diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4aaf4895b1..87aaac74db 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -57,8 +57,8 @@ jobs: fail-fast: false matrix: kubernetes: - - v1.25.16 - - v1.28.6 + - v1.27.11 + - v1.29.2 max-parallel: 2 runs-on: ubuntu-latest steps: @@ -73,7 +73,7 @@ jobs: - name: Install Ko uses: ko-build/setup-ko@v0.6 with: - version: v0.15.1 + version: v0.15.2 - name: Install kubectl uses: azure/setup-kubectl@v3 with: @@ -81,7 +81,7 @@ jobs: - name: Create kind cluster uses: helm/kind-action@v1 with: - version: v0.21.0 + version: v0.22.0 node_image: kindest/node:${{ matrix.kubernetes }} cluster_name: kind wait: 120s @@ -125,8 +125,8 @@ jobs: fail-fast: false matrix: kubernetes: - - v1.25.16 - - v1.28.6 + - v1.27.11 + - v1.29.2 max-parallel: 2 runs-on: ubuntu-latest steps: @@ -155,7 +155,7 @@ jobs: - name: Create kind cluster uses: helm/kind-action@v1 with: - version: v0.21.0 + version: v0.22.0 node_image: kindest/node:${{ matrix.kubernetes }} cluster_name: kind config: test/kind/config.yaml @@ -191,7 +191,7 @@ jobs: - name: Install Ko uses: ko-build/setup-ko@v0.6 with: - version: v0.15.1 + version: v0.15.2 - name: Install Shipwright Build run: | make install-controller-kind diff --git a/.github/workflows/nightly.yaml b/.github/workflows/nightly.yaml index 15f8b9f673..03f3b9789f 100644 --- a/.github/workflows/nightly.yaml +++ b/.github/workflows/nightly.yaml @@ -29,7 +29,7 @@ jobs: # Install tools - uses: ko-build/setup-ko@v0.6 with: - version: v0.15.1 + version: v0.15.2 - uses: imjasonh/setup-crane@00c9e93efa4e1138c9a7a5c594acd6c75a2fbf0c - uses: sigstore/cosign-installer@v3 diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index fb32af719b..89e579e4db 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -37,7 +37,7 @@ jobs: # Install tools - uses: ko-build/setup-ko@v0.6 with: - version: v0.15.1 + version: v0.15.2 - uses: sigstore/cosign-installer@v3 - name: Build Release Changelog diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index f660839b00..e92b7c7e3b 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -90,7 +90,7 @@ You must install these tools: 1. Follow the instructions in the Kubernetes doc to [Set up a kubernetes cluster](https://kubernetes.io/docs/setup/) 1. Set up a container image repository for pushing images. Any container image registry that is accessible to your cluster can be used for your repository. This can be a public registry like [Docker Hub](https://docs.docker.com/docker-hub/), [quay.io](https://quay.io), or a container registry runs by your cloud provider -**Note**: We support Kubernetes version 1.25 to 1.28. 1 cluster worker node for basic usage, 2+ cluster worker nodes for HA +**Note**: We support Kubernetes version 1.27 to 1.29. 1 cluster worker node for basic usage, 2+ cluster worker nodes for HA ## Environment Setup @@ -165,4 +165,3 @@ To look at the controller logs, run: ```sh kubectl -n shipwright-build logs $(kubectl -n shipwright-build get pods -l name=shipwright-build-controller -o name) ``` - diff --git a/HACK.md b/HACK.md index fbe2a66a86..ca7e76d259 100644 --- a/HACK.md +++ b/HACK.md @@ -41,7 +41,7 @@ In the near future, the above would be setup by the controller. make clean && make build ``` -* This project uses Golang 1.21 and controller-gen v0.10.0. +* This project uses Golang 1.21 and controller-gen v0.12.1. * The controllers create/watch Tekton objects. # Testing diff --git a/Makefile b/Makefile index 486273b407..64ad22eee4 100644 --- a/Makefile +++ b/Makefile @@ -36,7 +36,7 @@ ZAP_FLAGS ?= --zap-log-level=debug --zap-encoder=console TEST_NAMESPACE ?= default # CI: tekton pipelines controller version -TEKTON_VERSION ?= v0.44.0 +TEKTON_VERSION ?= v0.50.5 # E2E test flags TEST_E2E_FLAGS ?= -r -p --randomize-all -timeout=1h -trace -v diff --git a/README.md b/README.md index 9f3ecfdcfa..296cb3bf0f 100644 --- a/README.md +++ b/README.md @@ -36,12 +36,12 @@ Shipwright supports any tool that can build container images in Kubernetes clust ## Try It! -- We assume you already have a Kubernetes cluster (v1.25+). If you don't, you can use [KinD](https://kind.sigs.k8s.io), which you can install by running [`./hack/install-kind.sh`](./hack/install-kind.sh). +- We assume you already have a Kubernetes cluster (v1.27+). If you don't, you can use [KinD](https://kind.sigs.k8s.io), which you can install by running [`./hack/install-kind.sh`](./hack/install-kind.sh). -- We also require a Tekton installation (v0.47.+). To install the newest supported version, run: +- We also require a Tekton installation (v0.50.+). To install it, run: ```bash - kubectl apply --filename https://storage.googleapis.com/tekton-releases/pipeline/previous/v0.47.4/release.yaml + kubectl apply --filename https://storage.googleapis.com/tekton-releases/pipeline/previous/v0.50.5/release.yaml ``` If you are using OpenShift cluster refer [Running on OpenShift](#running-on-openshift) for some more configurations. @@ -189,8 +189,8 @@ To find out more on what's the best strategy or what else can Shipwright do for | Dependency | Supported versions | | -------------------------------------| ---------------------------- | -| [Kubernetes](https://kubernetes.io/) | v1.25.\*, v1.26.\*, v1.27.\*, v1.28.\* | -| [Tekton](https://tekton.dev) | v0.47.\*, v0.50.\* | +| [Kubernetes](https://kubernetes.io/) | v1.27.\*, v1.28.\*, v1.29.\* | +| [Tekton](https://tekton.dev) | v0.50.\*, v0.53.\*, v0.56.\* | ### Platform support diff --git a/deploy/crds/shipwright.io_buildruns.yaml b/deploy/crds/shipwright.io_buildruns.yaml index a8194e2d2f..1725b332e8 100644 --- a/deploy/crds/shipwright.io_buildruns.yaml +++ b/deploy/crds/shipwright.io_buildruns.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.4 + controller-gen.kubebuilder.io/version: v0.12.1 name: buildruns.shipwright.io spec: conversion: @@ -986,7 +986,7 @@ spec: value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. - More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true type: object @@ -1213,7 +1213,8 @@ spec: If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object selector: @@ -2910,7 +2911,7 @@ spec: medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means - that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true type: object @@ -3123,8 +3124,8 @@ spec: amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + to an implementation-defined value. Requests + cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object selector: @@ -4996,7 +4997,7 @@ spec: value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. - More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true type: object @@ -5223,7 +5224,8 @@ spec: If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object selector: @@ -7271,7 +7273,7 @@ spec: be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that - the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true type: object @@ -7510,7 +7512,8 @@ spec: a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + Requests cannot exceed Limits. More + info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object selector: @@ -9231,7 +9234,7 @@ spec: medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means - that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true type: object @@ -9444,8 +9447,8 @@ spec: amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + to an implementation-defined value. Requests + cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object selector: @@ -11254,7 +11257,7 @@ spec: value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. - More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true type: object @@ -11481,7 +11484,8 @@ spec: If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined - value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object selector: diff --git a/deploy/crds/shipwright.io_builds.yaml b/deploy/crds/shipwright.io_builds.yaml index 2003f317fa..c3ca0df34c 100644 --- a/deploy/crds/shipwright.io_builds.yaml +++ b/deploy/crds/shipwright.io_builds.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.4 + controller-gen.kubebuilder.io/version: v0.12.1 name: builds.shipwright.io spec: conversion: @@ -947,7 +947,7 @@ spec: medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means - that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true type: object @@ -1160,8 +1160,8 @@ spec: amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + to an implementation-defined value. Requests + cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object selector: @@ -3005,7 +3005,7 @@ spec: medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means - that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true type: object @@ -3218,8 +3218,8 @@ spec: amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + to an implementation-defined value. Requests + cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object selector: diff --git a/deploy/crds/shipwright.io_buildstrategies.yaml b/deploy/crds/shipwright.io_buildstrategies.yaml index 0c2c0779bb..7d02130b97 100644 --- a/deploy/crds/shipwright.io_buildstrategies.yaml +++ b/deploy/crds/shipwright.io_buildstrategies.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.4 + controller-gen.kubebuilder.io/version: v0.12.1 name: buildstrategies.shipwright.io spec: conversion: @@ -483,8 +483,6 @@ spec: type: integer grpc: description: GRPC specifies an action involving a GRPC port. - This is a beta field and requires enabling GRPCContainerProbe - feature gate. properties: port: description: Port number of the gRPC service. Number @@ -687,8 +685,6 @@ spec: type: integer grpc: description: GRPC specifies an action involving a GRPC port. - This is a beta field and requires enabling GRPCContainerProbe - feature gate. properties: port: description: Port number of the gRPC service. Number @@ -811,6 +807,26 @@ spec: format: int32 type: integer type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource resize + policy for the container. + properties: + resourceName: + description: 'Name of the resource to which this resource + resize policy applies. Supported values: cpu, memory.' + type: string + restartPolicy: + description: Restart policy to apply when specified resource + is resized. If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' @@ -857,8 +873,8 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + otherwise to an implementation-defined value. Requests + cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object securityContext: @@ -1071,8 +1087,6 @@ spec: type: integer grpc: description: GRPC specifies an action involving a GRPC port. - This is a beta field and requires enabling GRPCContainerProbe - feature gate. properties: port: description: Port number of the gRPC service. Number @@ -1749,7 +1763,7 @@ spec: medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means - that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true type: object @@ -1962,8 +1976,8 @@ spec: amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + to an implementation-defined value. Requests + cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object selector: @@ -3226,8 +3240,8 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + otherwise to an implementation-defined value. Requests + cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object securityContext: @@ -3845,7 +3859,7 @@ spec: medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means - that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true type: object @@ -4058,8 +4072,8 @@ spec: amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + to an implementation-defined value. Requests + cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object selector: diff --git a/deploy/crds/shipwright.io_clusterbuildstrategies.yaml b/deploy/crds/shipwright.io_clusterbuildstrategies.yaml index 1ed12dee0e..40e3e4ee89 100644 --- a/deploy/crds/shipwright.io_clusterbuildstrategies.yaml +++ b/deploy/crds/shipwright.io_clusterbuildstrategies.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.4 + controller-gen.kubebuilder.io/version: v0.12.1 name: clusterbuildstrategies.shipwright.io spec: conversion: @@ -483,8 +483,6 @@ spec: type: integer grpc: description: GRPC specifies an action involving a GRPC port. - This is a beta field and requires enabling GRPCContainerProbe - feature gate. properties: port: description: Port number of the gRPC service. Number @@ -687,8 +685,6 @@ spec: type: integer grpc: description: GRPC specifies an action involving a GRPC port. - This is a beta field and requires enabling GRPCContainerProbe - feature gate. properties: port: description: Port number of the gRPC service. Number @@ -811,6 +807,26 @@ spec: format: int32 type: integer type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource resize + policy for the container. + properties: + resourceName: + description: 'Name of the resource to which this resource + resize policy applies. Supported values: cpu, memory.' + type: string + restartPolicy: + description: Restart policy to apply when specified resource + is resized. If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic resources: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' @@ -857,8 +873,8 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + otherwise to an implementation-defined value. Requests + cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object securityContext: @@ -1071,8 +1087,6 @@ spec: type: integer grpc: description: GRPC specifies an action involving a GRPC port. - This is a beta field and requires enabling GRPCContainerProbe - feature gate. properties: port: description: Port number of the gRPC service. Number @@ -1749,7 +1763,7 @@ spec: medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means - that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true type: object @@ -1962,8 +1976,8 @@ spec: amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + to an implementation-defined value. Requests + cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object selector: @@ -3226,8 +3240,8 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + otherwise to an implementation-defined value. Requests + cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object securityContext: @@ -3845,7 +3859,7 @@ spec: medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means - that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true type: object @@ -4058,8 +4072,8 @@ spec: amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise - to an implementation-defined value. More info: - https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + to an implementation-defined value. Requests + cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object selector: diff --git a/go.mod b/go.mod index 44bb475d2d..3845448323 100644 --- a/go.mod +++ b/go.mod @@ -15,17 +15,17 @@ require ( github.com/prometheus/client_model v0.6.0 github.com/spf13/cobra v1.8.0 github.com/spf13/pflag v1.0.5 - github.com/tektoncd/pipeline v0.47.4 + github.com/tektoncd/pipeline v0.50.5 go.uber.org/zap v1.27.0 - k8s.io/api v0.26.9 - k8s.io/apiextensions-apiserver v0.26.9 - k8s.io/apimachinery v0.26.9 - k8s.io/client-go v0.26.9 - k8s.io/code-generator v0.26.9 - k8s.io/kubectl v0.26.9 - k8s.io/utils v0.0.0-20230209194617-a36077c30491 - knative.dev/pkg v0.0.0-20230221145627-8efb3485adcf - sigs.k8s.io/controller-runtime v0.14.6 + k8s.io/api v0.27.11 + k8s.io/apiextensions-apiserver v0.27.11 + k8s.io/apimachinery v0.27.11 + k8s.io/client-go v0.27.11 + k8s.io/code-generator v0.27.11 + k8s.io/kubectl v0.27.11 + k8s.io/utils v0.0.0-20230505201702-9f6742963106 + knative.dev/pkg v0.0.0-20231011201526-df28feae6d34 + sigs.k8s.io/controller-runtime v0.15.3 sigs.k8s.io/yaml v1.3.0 ) @@ -46,7 +46,7 @@ require ( github.com/docker/distribution v2.8.2+incompatible // indirect github.com/docker/docker v24.0.7+incompatible // indirect github.com/docker/docker-credential-helpers v0.7.0 // indirect - github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/emicklei/go-restful/v3 v3.10.2 // indirect github.com/emirpasic/gods v1.18.1 // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect @@ -55,9 +55,9 @@ require ( github.com/go-git/go-billy/v5 v5.5.0 // indirect github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.5.1 // indirect - github.com/go-logr/zapr v1.2.3 // indirect + github.com/go-logr/zapr v1.2.4 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect - github.com/go-openapi/jsonreference v0.20.1 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.22.3 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/gogo/protobuf v1.3.2 // indirect @@ -83,7 +83,7 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.0-rc3 // indirect + github.com/opencontainers/image-spec v1.1.0-rc4 // indirect github.com/pjbgf/sha1cd v0.3.0 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/prometheus/common v0.48.0 // indirect @@ -108,8 +108,8 @@ require ( golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.3.0 // indirect golang.org/x/tools v0.16.1 // indirect - gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect - google.golang.org/api v0.122.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.3.0 // indirect + google.golang.org/api v0.128.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 // indirect @@ -120,10 +120,10 @@ require ( gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/component-base v0.26.9 // indirect + k8s.io/component-base v0.27.11 // indirect k8s.io/gengo v0.0.0-20221011193443-fad74ee6edd9 // indirect - k8s.io/klog/v2 v2.90.1 // indirect - k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a // indirect + k8s.io/klog/v2 v2.100.1 // indirect + k8s.io/kube-openapi v0.0.0-20230515203736-54b630e78af5 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect ) diff --git a/go.sum b/go.sum index 71948e261e..4277e1d5f0 100644 --- a/go.sum +++ b/go.sum @@ -105,8 +105,8 @@ github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNk github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU= github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= -github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= -github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.10.2 h1:hIovbnmBTLjHXkqEBUz3HGpXZdM7ZrE9fJIZIqlJLqE= +github.com/emicklei/go-restful/v3 v3.10.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -115,7 +115,6 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= @@ -149,15 +148,15 @@ github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNV github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= -github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= +github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= +github.com/go-logr/zapr v1.2.4/go.mod h1:FyHWQIzQORZ0QVE1BtVHv3cKtNLuXsbNLtpuhNapBOA= github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= -github.com/go-openapi/jsonreference v0.20.1 h1:FBLnyygC4/IZZr893oiomc9XaghoveYTrLC1F86HID8= -github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= @@ -316,8 +315,8 @@ github.com/onsi/gomega v1.31.1 h1:KYppCUK+bUgAZwHOu7EXVBKyQA6ILvOESHkn/tgoqvo= github.com/onsi/gomega v1.31.1/go.mod h1:y40C95dwAD1Nz36SsEnxvfFe8FFfNxzI5eJ0EYGyAy0= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0-rc3 h1:fzg1mXZFj8YdPeNkRXMg+zb88BFV0Ys52cJydRwBkb8= -github.com/opencontainers/image-spec v1.1.0-rc3/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= +github.com/opencontainers/image-spec v1.1.0-rc4 h1:oOxKUJWnFC4YGHCCMNql1x4YaDfYBTS5Y4x/Cgeo1E0= +github.com/opencontainers/image-spec v1.1.0-rc4/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -357,8 +356,8 @@ github.com/prometheus/statsd_exporter v0.22.5 h1:BUlQKc9TOw6uTmYgGMgKA4mT3+xSvyg github.com/prometheus/statsd_exporter v0.22.5/go.mod h1:ZRQ6wIdLcUGkVv1JUzypTWEveGNIua59wtFF0ESTJxc= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= @@ -393,8 +392,8 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stvp/go-udp-testing v0.0.0-20201019212854-469649b16807/go.mod h1:7jxmlfBCDBXRzr0eAQJ48XC1hBu1np4CS5+cHEYfwpc= -github.com/tektoncd/pipeline v0.47.4 h1:MWc/Et/kil7BwJGs1MlJ2sJaijr4USxw7Mp9Q7K21x8= -github.com/tektoncd/pipeline v0.47.4/go.mod h1:7H1DeNuEJFGoExGwQTlRul2IziCPxkjXRdDdirWmoQs= +github.com/tektoncd/pipeline v0.50.5 h1:+wvALTyl/4M9es4K5aiZ5TAeWWoMGXBo4ppMnleQg4Y= +github.com/tektoncd/pipeline v0.50.5/go.mod h1:33ZU30CR8Pbr6Pb4l7+Tz1oPGsJBY5yxyG8Z+ejGO0w= github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8= github.com/vbatts/tar-split v0.11.3 h1:hLFqsOLQ1SsppQNTMpkpPXClLDfC2A3Zgy9OUU+RVck= github.com/vbatts/tar-split v0.11.3/go.mod h1:9QlHN18E+fEH7RdG+QAJJcuya3rqT7eXSTY7wGrAokY= @@ -407,6 +406,7 @@ github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= @@ -421,7 +421,7 @@ go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= @@ -429,7 +429,7 @@ go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9i go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ= go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -476,6 +476,7 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= @@ -540,6 +541,7 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= @@ -635,7 +637,6 @@ golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -664,6 +665,7 @@ golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= @@ -672,8 +674,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= -gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= +gomodules.xyz/jsonpatch/v2 v2.3.0 h1:8NFhfS6gzxNqjLIYnZxg319wZ5Qjnx4m/CcX+Klzazc= +gomodules.xyz/jsonpatch/v2 v2.3.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -691,8 +693,8 @@ google.golang.org/api v0.25.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.122.0 h1:zDobeejm3E7pEG1mNHvdxvjs5XJoCMzyNH+CmwL94Es= -google.golang.org/api v0.122.0/go.mod h1:gcitW0lvnyWjSp9nKxAbdHKIZ6vF4aajGueeslZOyms= +google.golang.org/api v0.128.0 h1:RjPESny5CnQRn9V6siglged+DZCgfu9l6mO9dkX9VOg= +google.golang.org/api v0.128.0/go.mod h1:Y611qgqaE92On/7g65MQgxYul3c0rEB894kniWLY750= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -794,7 +796,6 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= @@ -806,36 +807,36 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.26.9 h1:s8Y+G1u2JM55b90+Yo2RVb3PGT/hkWNVPN4idPERxJg= -k8s.io/api v0.26.9/go.mod h1:W/W4fEWRVzPD36820LlVUQfNBiSbiq0VPWRFJKwzmUg= -k8s.io/apiextensions-apiserver v0.26.9 h1:aJqWRuBj9i9J6tIDniqUDYM5QCRajTKXK/GO+zEccGQ= -k8s.io/apiextensions-apiserver v0.26.9/go.mod h1:L1uysxOP2kC1vkZTlHGUlUl5WSpa7e4GHJmGEZY7yLg= -k8s.io/apimachinery v0.26.9 h1:5yAV9cFR7Z4gIorKcAjWnx4uxtxiFsERwq4Pvmx0CCg= -k8s.io/apimachinery v0.26.9/go.mod h1:qYzLkrQ9lhrZRh0jNKo2cfvf/R1/kQONnSiyB7NUJU0= -k8s.io/client-go v0.26.9 h1:TGWi/6guEjIgT0Hg871Gsmx0qFuoGyGFjlFedrk7It0= -k8s.io/client-go v0.26.9/go.mod h1:tU1FZS0bwAmAFyPYpZycUQrQnUMzQ5MHloop7EbX6ow= -k8s.io/code-generator v0.26.9 h1:GiS9v3UE3bpE6Zp3sr9ooNJpZXwscjdyzMMVYWk6UmI= -k8s.io/code-generator v0.26.9/go.mod h1:seNkA/wYpeG1GT1REW1xHk4MCjQcVbx6FFAXKwCgdlE= -k8s.io/component-base v0.26.9 h1:qQVdQgyEIUe8EUkB3EEuQ9l5sgVlG2KgOB519yWEBGw= -k8s.io/component-base v0.26.9/go.mod h1:3WmW9lH9tbjpuvpAc22cPF/6C3VxCjMxkOU1j2mpzr8= +k8s.io/api v0.27.11 h1:IsGrWbXt7RkE+arc9GLQPYI5AtZkT+feBMorY+Nzx4I= +k8s.io/api v0.27.11/go.mod h1:SGqTcyqa0e+Db3pgyH6v+por5dO2OdTkKDCdD3Op3Ng= +k8s.io/apiextensions-apiserver v0.27.11 h1:+NZfWpN842yab/O1RRDu8+11eSACA2cPa2VixSxiONM= +k8s.io/apiextensions-apiserver v0.27.11/go.mod h1:BhfSAHizE6MjUiD0FtBxwOakCr8/GLB7Icepd0jk7XU= +k8s.io/apimachinery v0.27.11 h1:ivrKMN7JgdtKhay14S5UQlvilV3z6W+wjiSQTzyr5zc= +k8s.io/apimachinery v0.27.11/go.mod h1:IHu2ovJ60RqxyPSLmTel7KDLdOCRbpOxwtUBmwBnT/E= +k8s.io/client-go v0.27.11 h1:SZChXsDaN6lB5IYywCpvQs/ZUa5vK2NHkpEwUhoK3fQ= +k8s.io/client-go v0.27.11/go.mod h1:Rg3Yeuk9sX87gpVunVn3AsvMkGZfXuutTDC/jigBNUo= +k8s.io/code-generator v0.27.11 h1:J3SExe92VqSKMUSIbItJrTtPITOSWS58AqIbLwycNY0= +k8s.io/code-generator v0.27.11/go.mod h1:iyFD2q65bX/xrlrGzXi2kZXiBTbTDiAzEty3jq6a0NA= +k8s.io/component-base v0.27.11 h1:oq1xukCfjOlF6Jpe7/1PMOXhzicW3HqaUi8aaBmvCEM= +k8s.io/component-base v0.27.11/go.mod h1:YHs4U6ETkkvGj7NR44ISaSMiDQrV7nPtUnIenALJicw= k8s.io/gengo v0.0.0-20221011193443-fad74ee6edd9 h1:iu3o/SxaHVI7tKPtkGzD3M9IzrE21j+CUKH98NQJ8Ms= k8s.io/gengo v0.0.0-20221011193443-fad74ee6edd9/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw= -k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a h1:gmovKNur38vgoWfGtP5QOGNOA7ki4n6qNYoFAgMlNvg= -k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a/go.mod h1:y5VtZWM9sHHc2ZodIH/6SHzXj+TPU5USoA8lcIeKEKY= -k8s.io/kubectl v0.26.9 h1:0Z8wEFQoXqWbWfT7G9si5EKVjYf5KG4hocSB7B8Jkbc= -k8s.io/kubectl v0.26.9/go.mod h1:Rl9W561iyUZtzwgstHcKChzUo2xMaGqb7Za6jUDMKP4= -k8s.io/utils v0.0.0-20230209194617-a36077c30491 h1:r0BAOLElQnnFhE/ApUsg3iHdVYYPBjNSSOMowRZxxsY= -k8s.io/utils v0.0.0-20230209194617-a36077c30491/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -knative.dev/pkg v0.0.0-20230221145627-8efb3485adcf h1:TwvZFDpkyqpK2OCAwvNGV2Zjk14FzIh8X8Ci/du3jYI= -knative.dev/pkg v0.0.0-20230221145627-8efb3485adcf/go.mod h1:VO/fcEsq43seuONRQxZyftWHjpMabYzRHDtpSEQ/eoQ= +k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= +k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20230515203736-54b630e78af5 h1:azYPdzztXxPSa8wb+hksEKayiz0o+PPisO/d+QhWnoo= +k8s.io/kube-openapi v0.0.0-20230515203736-54b630e78af5/go.mod h1:kzo02I3kQ4BTtEfVLaPbjvCkX97YqGve33wzlb3fofQ= +k8s.io/kubectl v0.27.11 h1:rndS6LxY+qjTsEE/frCRlCL/TWeokWOgynSa3v5lbPM= +k8s.io/kubectl v0.27.11/go.mod h1:1eq/sCLAWOsPaHt/zojqEvVJECOmC68ol6eO4RyC4oc= +k8s.io/utils v0.0.0-20230505201702-9f6742963106 h1:EObNQ3TW2D+WptiYXlApGNLVy0zm/JIBVY9i+M4wpAU= +k8s.io/utils v0.0.0-20230505201702-9f6742963106/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +knative.dev/pkg v0.0.0-20231011201526-df28feae6d34 h1:H+K37bEBZ2STSWMjCgrdilj38KKZGVxBbob22K99Y50= +knative.dev/pkg v0.0.0-20231011201526-df28feae6d34/go.mod h1:ZRgzFBFmdBsARm6+Pkr9WRG8bXys8rYq64ELfLG6+9w= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/controller-runtime v0.14.6 h1:oxstGVvXGNnMvY7TAESYk+lzr6S3V5VFxQ6d92KcwQA= -sigs.k8s.io/controller-runtime v0.14.6/go.mod h1:WqIdsAY6JBsjfc/CqO0CORmNtoCtE4S6qbPc9s68h+0= +sigs.k8s.io/controller-runtime v0.15.3 h1:L+t5heIaI3zeejoIyyvLQs5vTVu/67IU2FfisVzFlBc= +sigs.k8s.io/controller-runtime v0.15.3/go.mod h1:kp4jckA4vTx281S/0Yk2LFEEQe67mjg+ev/yknv47Ds= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= diff --git a/hack/install-controller-gen.sh b/hack/install-controller-gen.sh index 77c74602af..7a222dfa77 100755 --- a/hack/install-controller-gen.sh +++ b/hack/install-controller-gen.sh @@ -11,7 +11,7 @@ set -eu # controller-gen version -CONTROLLER_GEN_VERSION="${CONTROLLER_GEN_VERSION:-v0.11.4}" +CONTROLLER_GEN_VERSION="${CONTROLLER_GEN_VERSION:-v0.12.1}" if [ ! -f "${GOPATH}/bin/controller-gen" ]; then echo "# Installing controller-gen..." diff --git a/hack/install-kind.sh b/hack/install-kind.sh index ec2a0a904a..41961e0a81 100755 --- a/hack/install-kind.sh +++ b/hack/install-kind.sh @@ -13,7 +13,7 @@ set -eu DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" &> /dev/null && pwd)" # kind version -KIND_VERSION="${KIND_VERSION:-v0.21.0}" +KIND_VERSION="${KIND_VERSION:-v0.22.0}" if ! hash kind > /dev/null 2>&1 ; then echo "# Installing KinD..." @@ -27,7 +27,7 @@ kind --version KIND_CLUSTER_NAME="${KIND_CLUSTER_NAME:-kind}" # kind cluster version -KIND_CLUSTER_VERSION="${KIND_CLUSTER_VERSION:-v1.25.16}" +KIND_CLUSTER_VERSION="${KIND_CLUSTER_VERSION:-v1.27.11}" echo "# Creating a new Kubernetes cluster..." kind delete cluster --name="${KIND_CLUSTER_NAME}" diff --git a/hack/install-tekton.sh b/hack/install-tekton.sh index 7dc6f6ae43..823f45a6fc 100755 --- a/hack/install-tekton.sh +++ b/hack/install-tekton.sh @@ -9,7 +9,7 @@ set -eu -TEKTON_VERSION="${TEKTON_VERSION:-v0.47.4}" +TEKTON_VERSION="${TEKTON_VERSION:-v0.50.5}" TEKTON_HOST="github.com" TEKTON_HOST_PATH="tektoncd/pipeline/releases/download" diff --git a/pkg/client/clientset/versioned/typed/build/v1alpha1/fake/fake_build.go b/pkg/client/clientset/versioned/typed/build/v1alpha1/fake/fake_build.go index d1e9310e38..0a3d1968d3 100644 --- a/pkg/client/clientset/versioned/typed/build/v1alpha1/fake/fake_build.go +++ b/pkg/client/clientset/versioned/typed/build/v1alpha1/fake/fake_build.go @@ -12,7 +12,6 @@ import ( v1alpha1 "github.com/shipwright-io/build/pkg/apis/build/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -24,9 +23,9 @@ type FakeBuilds struct { ns string } -var buildsResource = schema.GroupVersionResource{Group: "shipwright.io", Version: "v1alpha1", Resource: "builds"} +var buildsResource = v1alpha1.SchemeGroupVersion.WithResource("builds") -var buildsKind = schema.GroupVersionKind{Group: "shipwright.io", Version: "v1alpha1", Kind: "Build"} +var buildsKind = v1alpha1.SchemeGroupVersion.WithKind("Build") // Get takes name of the build, and returns the corresponding build object, and an error if there is any. func (c *FakeBuilds) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Build, err error) { diff --git a/pkg/client/clientset/versioned/typed/build/v1alpha1/fake/fake_buildrun.go b/pkg/client/clientset/versioned/typed/build/v1alpha1/fake/fake_buildrun.go index 7534ef78c3..22fc2563a6 100644 --- a/pkg/client/clientset/versioned/typed/build/v1alpha1/fake/fake_buildrun.go +++ b/pkg/client/clientset/versioned/typed/build/v1alpha1/fake/fake_buildrun.go @@ -12,7 +12,6 @@ import ( v1alpha1 "github.com/shipwright-io/build/pkg/apis/build/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -24,9 +23,9 @@ type FakeBuildRuns struct { ns string } -var buildrunsResource = schema.GroupVersionResource{Group: "shipwright.io", Version: "v1alpha1", Resource: "buildruns"} +var buildrunsResource = v1alpha1.SchemeGroupVersion.WithResource("buildruns") -var buildrunsKind = schema.GroupVersionKind{Group: "shipwright.io", Version: "v1alpha1", Kind: "BuildRun"} +var buildrunsKind = v1alpha1.SchemeGroupVersion.WithKind("BuildRun") // Get takes name of the buildRun, and returns the corresponding buildRun object, and an error if there is any. func (c *FakeBuildRuns) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.BuildRun, err error) { diff --git a/pkg/client/clientset/versioned/typed/build/v1alpha1/fake/fake_buildstrategy.go b/pkg/client/clientset/versioned/typed/build/v1alpha1/fake/fake_buildstrategy.go index ce23d87519..89b9eb1930 100644 --- a/pkg/client/clientset/versioned/typed/build/v1alpha1/fake/fake_buildstrategy.go +++ b/pkg/client/clientset/versioned/typed/build/v1alpha1/fake/fake_buildstrategy.go @@ -12,7 +12,6 @@ import ( v1alpha1 "github.com/shipwright-io/build/pkg/apis/build/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -24,9 +23,9 @@ type FakeBuildStrategies struct { ns string } -var buildstrategiesResource = schema.GroupVersionResource{Group: "shipwright.io", Version: "v1alpha1", Resource: "buildstrategies"} +var buildstrategiesResource = v1alpha1.SchemeGroupVersion.WithResource("buildstrategies") -var buildstrategiesKind = schema.GroupVersionKind{Group: "shipwright.io", Version: "v1alpha1", Kind: "BuildStrategy"} +var buildstrategiesKind = v1alpha1.SchemeGroupVersion.WithKind("BuildStrategy") // Get takes name of the buildStrategy, and returns the corresponding buildStrategy object, and an error if there is any. func (c *FakeBuildStrategies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.BuildStrategy, err error) { diff --git a/pkg/client/clientset/versioned/typed/build/v1alpha1/fake/fake_clusterbuildstrategy.go b/pkg/client/clientset/versioned/typed/build/v1alpha1/fake/fake_clusterbuildstrategy.go index 99157c8448..5a5863dfc4 100644 --- a/pkg/client/clientset/versioned/typed/build/v1alpha1/fake/fake_clusterbuildstrategy.go +++ b/pkg/client/clientset/versioned/typed/build/v1alpha1/fake/fake_clusterbuildstrategy.go @@ -12,7 +12,6 @@ import ( v1alpha1 "github.com/shipwright-io/build/pkg/apis/build/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -23,9 +22,9 @@ type FakeClusterBuildStrategies struct { Fake *FakeShipwrightV1alpha1 } -var clusterbuildstrategiesResource = schema.GroupVersionResource{Group: "shipwright.io", Version: "v1alpha1", Resource: "clusterbuildstrategies"} +var clusterbuildstrategiesResource = v1alpha1.SchemeGroupVersion.WithResource("clusterbuildstrategies") -var clusterbuildstrategiesKind = schema.GroupVersionKind{Group: "shipwright.io", Version: "v1alpha1", Kind: "ClusterBuildStrategy"} +var clusterbuildstrategiesKind = v1alpha1.SchemeGroupVersion.WithKind("ClusterBuildStrategy") // Get takes name of the clusterBuildStrategy, and returns the corresponding clusterBuildStrategy object, and an error if there is any. func (c *FakeClusterBuildStrategies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterBuildStrategy, err error) { diff --git a/pkg/client/clientset/versioned/typed/build/v1beta1/fake/fake_build.go b/pkg/client/clientset/versioned/typed/build/v1beta1/fake/fake_build.go index f83b13146d..80f9a397bc 100644 --- a/pkg/client/clientset/versioned/typed/build/v1beta1/fake/fake_build.go +++ b/pkg/client/clientset/versioned/typed/build/v1beta1/fake/fake_build.go @@ -12,7 +12,6 @@ import ( v1beta1 "github.com/shipwright-io/build/pkg/apis/build/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -24,9 +23,9 @@ type FakeBuilds struct { ns string } -var buildsResource = schema.GroupVersionResource{Group: "shipwright.io", Version: "v1beta1", Resource: "builds"} +var buildsResource = v1beta1.SchemeGroupVersion.WithResource("builds") -var buildsKind = schema.GroupVersionKind{Group: "shipwright.io", Version: "v1beta1", Kind: "Build"} +var buildsKind = v1beta1.SchemeGroupVersion.WithKind("Build") // Get takes name of the build, and returns the corresponding build object, and an error if there is any. func (c *FakeBuilds) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Build, err error) { diff --git a/pkg/client/clientset/versioned/typed/build/v1beta1/fake/fake_buildrun.go b/pkg/client/clientset/versioned/typed/build/v1beta1/fake/fake_buildrun.go index 4a06dce30c..9de52cfd46 100644 --- a/pkg/client/clientset/versioned/typed/build/v1beta1/fake/fake_buildrun.go +++ b/pkg/client/clientset/versioned/typed/build/v1beta1/fake/fake_buildrun.go @@ -12,7 +12,6 @@ import ( v1beta1 "github.com/shipwright-io/build/pkg/apis/build/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -24,9 +23,9 @@ type FakeBuildRuns struct { ns string } -var buildrunsResource = schema.GroupVersionResource{Group: "shipwright.io", Version: "v1beta1", Resource: "buildruns"} +var buildrunsResource = v1beta1.SchemeGroupVersion.WithResource("buildruns") -var buildrunsKind = schema.GroupVersionKind{Group: "shipwright.io", Version: "v1beta1", Kind: "BuildRun"} +var buildrunsKind = v1beta1.SchemeGroupVersion.WithKind("BuildRun") // Get takes name of the buildRun, and returns the corresponding buildRun object, and an error if there is any. func (c *FakeBuildRuns) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.BuildRun, err error) { diff --git a/pkg/client/clientset/versioned/typed/build/v1beta1/fake/fake_buildstrategy.go b/pkg/client/clientset/versioned/typed/build/v1beta1/fake/fake_buildstrategy.go index e6e5269bf1..057a289d86 100644 --- a/pkg/client/clientset/versioned/typed/build/v1beta1/fake/fake_buildstrategy.go +++ b/pkg/client/clientset/versioned/typed/build/v1beta1/fake/fake_buildstrategy.go @@ -12,7 +12,6 @@ import ( v1beta1 "github.com/shipwright-io/build/pkg/apis/build/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -24,9 +23,9 @@ type FakeBuildStrategies struct { ns string } -var buildstrategiesResource = schema.GroupVersionResource{Group: "shipwright.io", Version: "v1beta1", Resource: "buildstrategies"} +var buildstrategiesResource = v1beta1.SchemeGroupVersion.WithResource("buildstrategies") -var buildstrategiesKind = schema.GroupVersionKind{Group: "shipwright.io", Version: "v1beta1", Kind: "BuildStrategy"} +var buildstrategiesKind = v1beta1.SchemeGroupVersion.WithKind("BuildStrategy") // Get takes name of the buildStrategy, and returns the corresponding buildStrategy object, and an error if there is any. func (c *FakeBuildStrategies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.BuildStrategy, err error) { diff --git a/pkg/client/clientset/versioned/typed/build/v1beta1/fake/fake_clusterbuildstrategy.go b/pkg/client/clientset/versioned/typed/build/v1beta1/fake/fake_clusterbuildstrategy.go index 0e484755cf..2932d51909 100644 --- a/pkg/client/clientset/versioned/typed/build/v1beta1/fake/fake_clusterbuildstrategy.go +++ b/pkg/client/clientset/versioned/typed/build/v1beta1/fake/fake_clusterbuildstrategy.go @@ -12,7 +12,6 @@ import ( v1beta1 "github.com/shipwright-io/build/pkg/apis/build/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -23,9 +22,9 @@ type FakeClusterBuildStrategies struct { Fake *FakeShipwrightV1beta1 } -var clusterbuildstrategiesResource = schema.GroupVersionResource{Group: "shipwright.io", Version: "v1beta1", Resource: "clusterbuildstrategies"} +var clusterbuildstrategiesResource = v1beta1.SchemeGroupVersion.WithResource("clusterbuildstrategies") -var clusterbuildstrategiesKind = schema.GroupVersionKind{Group: "shipwright.io", Version: "v1beta1", Kind: "ClusterBuildStrategy"} +var clusterbuildstrategiesKind = v1beta1.SchemeGroupVersion.WithKind("ClusterBuildStrategy") // Get takes name of the clusterBuildStrategy, and returns the corresponding clusterBuildStrategy object, and an error if there is any. func (c *FakeClusterBuildStrategies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ClusterBuildStrategy, err error) { diff --git a/pkg/controller/fakes/client.go b/pkg/controller/fakes/client.go index 638ffc3dc4..86d0002874 100644 --- a/pkg/controller/fakes/client.go +++ b/pkg/controller/fakes/client.go @@ -10,6 +10,7 @@ import ( "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -68,6 +69,32 @@ type FakeClient struct { getReturnsOnCall map[int]struct { result1 error } + GroupVersionKindForStub func(runtime.Object) (schema.GroupVersionKind, error) + groupVersionKindForMutex sync.RWMutex + groupVersionKindForArgsForCall []struct { + arg1 runtime.Object + } + groupVersionKindForReturns struct { + result1 schema.GroupVersionKind + result2 error + } + groupVersionKindForReturnsOnCall map[int]struct { + result1 schema.GroupVersionKind + result2 error + } + IsObjectNamespacedStub func(runtime.Object) (bool, error) + isObjectNamespacedMutex sync.RWMutex + isObjectNamespacedArgsForCall []struct { + arg1 runtime.Object + } + isObjectNamespacedReturns struct { + result1 bool + result2 error + } + isObjectNamespacedReturnsOnCall map[int]struct { + result1 bool + result2 error + } ListStub func(context.Context, client.ObjectList, ...client.ListOption) error listMutex sync.RWMutex listArgsForCall []struct { @@ -406,6 +433,134 @@ func (fake *FakeClient) GetReturnsOnCall(i int, result1 error) { }{result1} } +func (fake *FakeClient) GroupVersionKindFor(arg1 runtime.Object) (schema.GroupVersionKind, error) { + fake.groupVersionKindForMutex.Lock() + ret, specificReturn := fake.groupVersionKindForReturnsOnCall[len(fake.groupVersionKindForArgsForCall)] + fake.groupVersionKindForArgsForCall = append(fake.groupVersionKindForArgsForCall, struct { + arg1 runtime.Object + }{arg1}) + stub := fake.GroupVersionKindForStub + fakeReturns := fake.groupVersionKindForReturns + fake.recordInvocation("GroupVersionKindFor", []interface{}{arg1}) + fake.groupVersionKindForMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *FakeClient) GroupVersionKindForCallCount() int { + fake.groupVersionKindForMutex.RLock() + defer fake.groupVersionKindForMutex.RUnlock() + return len(fake.groupVersionKindForArgsForCall) +} + +func (fake *FakeClient) GroupVersionKindForCalls(stub func(runtime.Object) (schema.GroupVersionKind, error)) { + fake.groupVersionKindForMutex.Lock() + defer fake.groupVersionKindForMutex.Unlock() + fake.GroupVersionKindForStub = stub +} + +func (fake *FakeClient) GroupVersionKindForArgsForCall(i int) runtime.Object { + fake.groupVersionKindForMutex.RLock() + defer fake.groupVersionKindForMutex.RUnlock() + argsForCall := fake.groupVersionKindForArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeClient) GroupVersionKindForReturns(result1 schema.GroupVersionKind, result2 error) { + fake.groupVersionKindForMutex.Lock() + defer fake.groupVersionKindForMutex.Unlock() + fake.GroupVersionKindForStub = nil + fake.groupVersionKindForReturns = struct { + result1 schema.GroupVersionKind + result2 error + }{result1, result2} +} + +func (fake *FakeClient) GroupVersionKindForReturnsOnCall(i int, result1 schema.GroupVersionKind, result2 error) { + fake.groupVersionKindForMutex.Lock() + defer fake.groupVersionKindForMutex.Unlock() + fake.GroupVersionKindForStub = nil + if fake.groupVersionKindForReturnsOnCall == nil { + fake.groupVersionKindForReturnsOnCall = make(map[int]struct { + result1 schema.GroupVersionKind + result2 error + }) + } + fake.groupVersionKindForReturnsOnCall[i] = struct { + result1 schema.GroupVersionKind + result2 error + }{result1, result2} +} + +func (fake *FakeClient) IsObjectNamespaced(arg1 runtime.Object) (bool, error) { + fake.isObjectNamespacedMutex.Lock() + ret, specificReturn := fake.isObjectNamespacedReturnsOnCall[len(fake.isObjectNamespacedArgsForCall)] + fake.isObjectNamespacedArgsForCall = append(fake.isObjectNamespacedArgsForCall, struct { + arg1 runtime.Object + }{arg1}) + stub := fake.IsObjectNamespacedStub + fakeReturns := fake.isObjectNamespacedReturns + fake.recordInvocation("IsObjectNamespaced", []interface{}{arg1}) + fake.isObjectNamespacedMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *FakeClient) IsObjectNamespacedCallCount() int { + fake.isObjectNamespacedMutex.RLock() + defer fake.isObjectNamespacedMutex.RUnlock() + return len(fake.isObjectNamespacedArgsForCall) +} + +func (fake *FakeClient) IsObjectNamespacedCalls(stub func(runtime.Object) (bool, error)) { + fake.isObjectNamespacedMutex.Lock() + defer fake.isObjectNamespacedMutex.Unlock() + fake.IsObjectNamespacedStub = stub +} + +func (fake *FakeClient) IsObjectNamespacedArgsForCall(i int) runtime.Object { + fake.isObjectNamespacedMutex.RLock() + defer fake.isObjectNamespacedMutex.RUnlock() + argsForCall := fake.isObjectNamespacedArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeClient) IsObjectNamespacedReturns(result1 bool, result2 error) { + fake.isObjectNamespacedMutex.Lock() + defer fake.isObjectNamespacedMutex.Unlock() + fake.IsObjectNamespacedStub = nil + fake.isObjectNamespacedReturns = struct { + result1 bool + result2 error + }{result1, result2} +} + +func (fake *FakeClient) IsObjectNamespacedReturnsOnCall(i int, result1 bool, result2 error) { + fake.isObjectNamespacedMutex.Lock() + defer fake.isObjectNamespacedMutex.Unlock() + fake.IsObjectNamespacedStub = nil + if fake.isObjectNamespacedReturnsOnCall == nil { + fake.isObjectNamespacedReturnsOnCall = make(map[int]struct { + result1 bool + result2 error + }) + } + fake.isObjectNamespacedReturnsOnCall[i] = struct { + result1 bool + result2 error + }{result1, result2} +} + func (fake *FakeClient) List(arg1 context.Context, arg2 client.ObjectList, arg3 ...client.ListOption) error { fake.listMutex.Lock() ret, specificReturn := fake.listReturnsOnCall[len(fake.listArgsForCall)] @@ -827,6 +982,10 @@ func (fake *FakeClient) Invocations() map[string][][]interface{} { defer fake.deleteAllOfMutex.RUnlock() fake.getMutex.RLock() defer fake.getMutex.RUnlock() + fake.groupVersionKindForMutex.RLock() + defer fake.groupVersionKindForMutex.RUnlock() + fake.isObjectNamespacedMutex.RLock() + defer fake.isObjectNamespacedMutex.RUnlock() fake.listMutex.RLock() defer fake.listMutex.RUnlock() fake.patchMutex.RLock() diff --git a/pkg/controller/fakes/manager.go b/pkg/controller/fakes/manager.go index 7bbe2204ab..ad7a85328b 100644 --- a/pkg/controller/fakes/manager.go +++ b/pkg/controller/fakes/manager.go @@ -16,7 +16,7 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/config/v1alpha1" + "sigs.k8s.io/controller-runtime/pkg/config" "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/webhook" @@ -120,15 +120,15 @@ type FakeManager struct { getConfigReturnsOnCall map[int]struct { result1 *rest.Config } - GetControllerOptionsStub func() v1alpha1.ControllerConfigurationSpec + GetControllerOptionsStub func() config.Controller getControllerOptionsMutex sync.RWMutex getControllerOptionsArgsForCall []struct { } getControllerOptionsReturns struct { - result1 v1alpha1.ControllerConfigurationSpec + result1 config.Controller } getControllerOptionsReturnsOnCall map[int]struct { - result1 v1alpha1.ControllerConfigurationSpec + result1 config.Controller } GetEventRecorderForStub func(string) record.EventRecorder getEventRecorderForMutex sync.RWMutex @@ -151,6 +151,16 @@ type FakeManager struct { getFieldIndexerReturnsOnCall map[int]struct { result1 client.FieldIndexer } + GetHTTPClientStub func() *http.Client + getHTTPClientMutex sync.RWMutex + getHTTPClientArgsForCall []struct { + } + getHTTPClientReturns struct { + result1 *http.Client + } + getHTTPClientReturnsOnCall map[int]struct { + result1 *http.Client + } GetLoggerStub func() logr.Logger getLoggerMutex sync.RWMutex getLoggerArgsForCall []struct { @@ -181,26 +191,15 @@ type FakeManager struct { getSchemeReturnsOnCall map[int]struct { result1 *runtime.Scheme } - GetWebhookServerStub func() *webhook.Server + GetWebhookServerStub func() webhook.Server getWebhookServerMutex sync.RWMutex getWebhookServerArgsForCall []struct { } getWebhookServerReturns struct { - result1 *webhook.Server + result1 webhook.Server } getWebhookServerReturnsOnCall map[int]struct { - result1 *webhook.Server - } - SetFieldsStub func(interface{}) error - setFieldsMutex sync.RWMutex - setFieldsArgsForCall []struct { - arg1 interface{} - } - setFieldsReturns struct { - result1 error - } - setFieldsReturnsOnCall map[int]struct { - result1 error + result1 webhook.Server } StartStub func(context.Context) error startMutex sync.RWMutex @@ -729,7 +728,7 @@ func (fake *FakeManager) GetConfigReturnsOnCall(i int, result1 *rest.Config) { }{result1} } -func (fake *FakeManager) GetControllerOptions() v1alpha1.ControllerConfigurationSpec { +func (fake *FakeManager) GetControllerOptions() config.Controller { fake.getControllerOptionsMutex.Lock() ret, specificReturn := fake.getControllerOptionsReturnsOnCall[len(fake.getControllerOptionsArgsForCall)] fake.getControllerOptionsArgsForCall = append(fake.getControllerOptionsArgsForCall, struct { @@ -753,32 +752,32 @@ func (fake *FakeManager) GetControllerOptionsCallCount() int { return len(fake.getControllerOptionsArgsForCall) } -func (fake *FakeManager) GetControllerOptionsCalls(stub func() v1alpha1.ControllerConfigurationSpec) { +func (fake *FakeManager) GetControllerOptionsCalls(stub func() config.Controller) { fake.getControllerOptionsMutex.Lock() defer fake.getControllerOptionsMutex.Unlock() fake.GetControllerOptionsStub = stub } -func (fake *FakeManager) GetControllerOptionsReturns(result1 v1alpha1.ControllerConfigurationSpec) { +func (fake *FakeManager) GetControllerOptionsReturns(result1 config.Controller) { fake.getControllerOptionsMutex.Lock() defer fake.getControllerOptionsMutex.Unlock() fake.GetControllerOptionsStub = nil fake.getControllerOptionsReturns = struct { - result1 v1alpha1.ControllerConfigurationSpec + result1 config.Controller }{result1} } -func (fake *FakeManager) GetControllerOptionsReturnsOnCall(i int, result1 v1alpha1.ControllerConfigurationSpec) { +func (fake *FakeManager) GetControllerOptionsReturnsOnCall(i int, result1 config.Controller) { fake.getControllerOptionsMutex.Lock() defer fake.getControllerOptionsMutex.Unlock() fake.GetControllerOptionsStub = nil if fake.getControllerOptionsReturnsOnCall == nil { fake.getControllerOptionsReturnsOnCall = make(map[int]struct { - result1 v1alpha1.ControllerConfigurationSpec + result1 config.Controller }) } fake.getControllerOptionsReturnsOnCall[i] = struct { - result1 v1alpha1.ControllerConfigurationSpec + result1 config.Controller }{result1} } @@ -896,6 +895,59 @@ func (fake *FakeManager) GetFieldIndexerReturnsOnCall(i int, result1 client.Fiel }{result1} } +func (fake *FakeManager) GetHTTPClient() *http.Client { + fake.getHTTPClientMutex.Lock() + ret, specificReturn := fake.getHTTPClientReturnsOnCall[len(fake.getHTTPClientArgsForCall)] + fake.getHTTPClientArgsForCall = append(fake.getHTTPClientArgsForCall, struct { + }{}) + stub := fake.GetHTTPClientStub + fakeReturns := fake.getHTTPClientReturns + fake.recordInvocation("GetHTTPClient", []interface{}{}) + fake.getHTTPClientMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeManager) GetHTTPClientCallCount() int { + fake.getHTTPClientMutex.RLock() + defer fake.getHTTPClientMutex.RUnlock() + return len(fake.getHTTPClientArgsForCall) +} + +func (fake *FakeManager) GetHTTPClientCalls(stub func() *http.Client) { + fake.getHTTPClientMutex.Lock() + defer fake.getHTTPClientMutex.Unlock() + fake.GetHTTPClientStub = stub +} + +func (fake *FakeManager) GetHTTPClientReturns(result1 *http.Client) { + fake.getHTTPClientMutex.Lock() + defer fake.getHTTPClientMutex.Unlock() + fake.GetHTTPClientStub = nil + fake.getHTTPClientReturns = struct { + result1 *http.Client + }{result1} +} + +func (fake *FakeManager) GetHTTPClientReturnsOnCall(i int, result1 *http.Client) { + fake.getHTTPClientMutex.Lock() + defer fake.getHTTPClientMutex.Unlock() + fake.GetHTTPClientStub = nil + if fake.getHTTPClientReturnsOnCall == nil { + fake.getHTTPClientReturnsOnCall = make(map[int]struct { + result1 *http.Client + }) + } + fake.getHTTPClientReturnsOnCall[i] = struct { + result1 *http.Client + }{result1} +} + func (fake *FakeManager) GetLogger() logr.Logger { fake.getLoggerMutex.Lock() ret, specificReturn := fake.getLoggerReturnsOnCall[len(fake.getLoggerArgsForCall)] @@ -1055,7 +1107,7 @@ func (fake *FakeManager) GetSchemeReturnsOnCall(i int, result1 *runtime.Scheme) }{result1} } -func (fake *FakeManager) GetWebhookServer() *webhook.Server { +func (fake *FakeManager) GetWebhookServer() webhook.Server { fake.getWebhookServerMutex.Lock() ret, specificReturn := fake.getWebhookServerReturnsOnCall[len(fake.getWebhookServerArgsForCall)] fake.getWebhookServerArgsForCall = append(fake.getWebhookServerArgsForCall, struct { @@ -1079,93 +1131,32 @@ func (fake *FakeManager) GetWebhookServerCallCount() int { return len(fake.getWebhookServerArgsForCall) } -func (fake *FakeManager) GetWebhookServerCalls(stub func() *webhook.Server) { +func (fake *FakeManager) GetWebhookServerCalls(stub func() webhook.Server) { fake.getWebhookServerMutex.Lock() defer fake.getWebhookServerMutex.Unlock() fake.GetWebhookServerStub = stub } -func (fake *FakeManager) GetWebhookServerReturns(result1 *webhook.Server) { +func (fake *FakeManager) GetWebhookServerReturns(result1 webhook.Server) { fake.getWebhookServerMutex.Lock() defer fake.getWebhookServerMutex.Unlock() fake.GetWebhookServerStub = nil fake.getWebhookServerReturns = struct { - result1 *webhook.Server + result1 webhook.Server }{result1} } -func (fake *FakeManager) GetWebhookServerReturnsOnCall(i int, result1 *webhook.Server) { +func (fake *FakeManager) GetWebhookServerReturnsOnCall(i int, result1 webhook.Server) { fake.getWebhookServerMutex.Lock() defer fake.getWebhookServerMutex.Unlock() fake.GetWebhookServerStub = nil if fake.getWebhookServerReturnsOnCall == nil { fake.getWebhookServerReturnsOnCall = make(map[int]struct { - result1 *webhook.Server + result1 webhook.Server }) } fake.getWebhookServerReturnsOnCall[i] = struct { - result1 *webhook.Server - }{result1} -} - -func (fake *FakeManager) SetFields(arg1 interface{}) error { - fake.setFieldsMutex.Lock() - ret, specificReturn := fake.setFieldsReturnsOnCall[len(fake.setFieldsArgsForCall)] - fake.setFieldsArgsForCall = append(fake.setFieldsArgsForCall, struct { - arg1 interface{} - }{arg1}) - stub := fake.SetFieldsStub - fakeReturns := fake.setFieldsReturns - fake.recordInvocation("SetFields", []interface{}{arg1}) - fake.setFieldsMutex.Unlock() - if stub != nil { - return stub(arg1) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeManager) SetFieldsCallCount() int { - fake.setFieldsMutex.RLock() - defer fake.setFieldsMutex.RUnlock() - return len(fake.setFieldsArgsForCall) -} - -func (fake *FakeManager) SetFieldsCalls(stub func(interface{}) error) { - fake.setFieldsMutex.Lock() - defer fake.setFieldsMutex.Unlock() - fake.SetFieldsStub = stub -} - -func (fake *FakeManager) SetFieldsArgsForCall(i int) interface{} { - fake.setFieldsMutex.RLock() - defer fake.setFieldsMutex.RUnlock() - argsForCall := fake.setFieldsArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *FakeManager) SetFieldsReturns(result1 error) { - fake.setFieldsMutex.Lock() - defer fake.setFieldsMutex.Unlock() - fake.SetFieldsStub = nil - fake.setFieldsReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeManager) SetFieldsReturnsOnCall(i int, result1 error) { - fake.setFieldsMutex.Lock() - defer fake.setFieldsMutex.Unlock() - fake.SetFieldsStub = nil - if fake.setFieldsReturnsOnCall == nil { - fake.setFieldsReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.setFieldsReturnsOnCall[i] = struct { - result1 error + result1 webhook.Server }{result1} } @@ -1257,6 +1248,8 @@ func (fake *FakeManager) Invocations() map[string][][]interface{} { defer fake.getEventRecorderForMutex.RUnlock() fake.getFieldIndexerMutex.RLock() defer fake.getFieldIndexerMutex.RUnlock() + fake.getHTTPClientMutex.RLock() + defer fake.getHTTPClientMutex.RUnlock() fake.getLoggerMutex.RLock() defer fake.getLoggerMutex.RUnlock() fake.getRESTMapperMutex.RLock() @@ -1265,8 +1258,6 @@ func (fake *FakeManager) Invocations() map[string][][]interface{} { defer fake.getSchemeMutex.RUnlock() fake.getWebhookServerMutex.RLock() defer fake.getWebhookServerMutex.RUnlock() - fake.setFieldsMutex.RLock() - defer fake.setFieldsMutex.RUnlock() fake.startMutex.RLock() defer fake.startMutex.RUnlock() copiedInvocations := map[string][][]interface{}{} diff --git a/pkg/reconciler/build/controller.go b/pkg/reconciler/build/controller.go index 64c7aa11b7..bc12a39759 100644 --- a/pkg/reconciler/build/controller.go +++ b/pkg/reconciler/build/controller.go @@ -119,7 +119,7 @@ func add(ctx context.Context, mgr manager.Manager, r reconcile.Reconciler, maxCo } // Watch for changes to primary resource Build - if err = c.Watch(&source.Kind{Type: &build.Build{}}, &handler.EnqueueRequestForObject{}, pred); err != nil { + if err = c.Watch(source.Kind(mgr.GetCache(), &build.Build{}), &handler.EnqueueRequestForObject{}, pred); err != nil { return err } @@ -157,7 +157,7 @@ func add(ctx context.Context, mgr manager.Manager, r reconcile.Reconciler, maxCo }, } - return c.Watch(&source.Kind{Type: &corev1.Secret{}}, handler.EnqueueRequestsFromMapFunc(func(o client.Object) []reconcile.Request { + return c.Watch(source.Kind(mgr.GetCache(), &corev1.Secret{}), handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, o client.Object) []reconcile.Request { secret := o.(*corev1.Secret) buildList := &build.BuildList{} diff --git a/pkg/reconciler/buildlimitcleanup/controller.go b/pkg/reconciler/buildlimitcleanup/controller.go index 270696099c..eb23e139a3 100644 --- a/pkg/reconciler/buildlimitcleanup/controller.go +++ b/pkg/reconciler/buildlimitcleanup/controller.go @@ -113,12 +113,12 @@ func add(mgr manager.Manager, r reconcile.Reconciler, maxConcurrentReconciles in } // Watch for changes to primary resource Build - if err = c.Watch(&source.Kind{Type: &buildv1beta1.Build{}}, &handler.EnqueueRequestForObject{}, pred); err != nil { + if err = c.Watch(source.Kind(mgr.GetCache(), &buildv1beta1.Build{}), &handler.EnqueueRequestForObject{}, pred); err != nil { return err } // Watch for changes to resource BuildRun - return c.Watch(&source.Kind{Type: &buildv1beta1.BuildRun{}}, handler.EnqueueRequestsFromMapFunc(func(o client.Object) []reconcile.Request { + return c.Watch(source.Kind(mgr.GetCache(), &buildv1beta1.BuildRun{}), handler.EnqueueRequestsFromMapFunc(func(_ context.Context, o client.Object) []reconcile.Request { buildRun := o.(*buildv1beta1.BuildRun) return []reconcile.Request{ diff --git a/pkg/reconciler/buildrun/controller.go b/pkg/reconciler/buildrun/controller.go index 112c5afa41..f6c0974868 100644 --- a/pkg/reconciler/buildrun/controller.go +++ b/pkg/reconciler/buildrun/controller.go @@ -107,13 +107,13 @@ func add(mgr manager.Manager, r reconcile.Reconciler, maxConcurrentReconciles in } // Watch for changes to primary resource BuildRun - if err = c.Watch(&source.Kind{Type: &buildv1beta1.BuildRun{}}, &handler.EnqueueRequestForObject{}, predBuildRun); err != nil { + if err = c.Watch(source.Kind(mgr.GetCache(), &buildv1beta1.BuildRun{}), &handler.EnqueueRequestForObject{}, predBuildRun); err != nil { return err } // enqueue Reconciles requests only for events where a TaskRun already exists and that is related // to a BuildRun - return c.Watch(&source.Kind{Type: &pipelineapi.TaskRun{}}, handler.EnqueueRequestsFromMapFunc(func(o client.Object) []reconcile.Request { + return c.Watch(source.Kind(mgr.GetCache(), &pipelineapi.TaskRun{}), handler.EnqueueRequestsFromMapFunc(func(_ context.Context, o client.Object) []reconcile.Request { taskRun := o.(*pipelineapi.TaskRun) // check if TaskRun is related to BuildRun diff --git a/pkg/reconciler/buildrunttlcleanup/controller.go b/pkg/reconciler/buildrunttlcleanup/controller.go index 4cef1bc7d9..d21f0857e9 100644 --- a/pkg/reconciler/buildrunttlcleanup/controller.go +++ b/pkg/reconciler/buildrunttlcleanup/controller.go @@ -137,5 +137,5 @@ func add(mgr manager.Manager, r reconcile.Reconciler, maxConcurrentReconciles in }, } // Watch for changes to primary resource BuildRun - return c.Watch(&source.Kind{Type: &buildv1beta1.BuildRun{}}, &handler.EnqueueRequestForObject{}, predBuildRun) + return c.Watch(source.Kind(mgr.GetCache(), &buildv1beta1.BuildRun{}), &handler.EnqueueRequestForObject{}, predBuildRun) } diff --git a/pkg/reconciler/buildstrategy/controller.go b/pkg/reconciler/buildstrategy/controller.go index bc21822861..fc7d31363c 100644 --- a/pkg/reconciler/buildstrategy/controller.go +++ b/pkg/reconciler/buildstrategy/controller.go @@ -40,5 +40,5 @@ func add(mgr manager.Manager, r reconcile.Reconciler, maxConcurrentReconciles in } // Watch for changes to primary resource BuildStrategy - return c.Watch(&source.Kind{Type: &buildv1beta1.BuildStrategy{}}, &handler.EnqueueRequestForObject{}) + return c.Watch(source.Kind(mgr.GetCache(), &buildv1beta1.BuildStrategy{}), &handler.EnqueueRequestForObject{}) } diff --git a/pkg/reconciler/clusterbuildstrategy/controller.go b/pkg/reconciler/clusterbuildstrategy/controller.go index 51fead9dd0..789bd26778 100644 --- a/pkg/reconciler/clusterbuildstrategy/controller.go +++ b/pkg/reconciler/clusterbuildstrategy/controller.go @@ -40,5 +40,5 @@ func add(mgr manager.Manager, r reconcile.Reconciler, maxConcurrentReconciles in } // Watch for changes to primary resource ClusterBuildStrategy - return c.Watch(&source.Kind{Type: &buildv1beta1.ClusterBuildStrategy{}}, &handler.EnqueueRequestForObject{}) + return c.Watch(source.Kind(mgr.GetCache(), &buildv1beta1.ClusterBuildStrategy{}), &handler.EnqueueRequestForObject{}) } diff --git a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md index 74a378157a..352018e703 100644 --- a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md +++ b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md @@ -1,10 +1,26 @@ # Change history of go-restful -## [v3.9.0] - 20221-07-21 +## [v3.10.2] - 2023-03-09 + +- introduced MergePathStrategy to be able to revert behaviour of path concatenation to 3.9.0 + see comment in Readme how to customize this behaviour. + +## [v3.10.1] - 2022-11-19 + +- fix broken 3.10.0 by using path package for joining paths + +## [v3.10.0] - 2022-10-11 - BROKEN + +- changed tokenizer to match std route match behavior; do not trimright the path (#511) +- Add MIME_ZIP (#512) +- Add MIME_ZIP and HEADER_ContentDisposition (#513) +- Changed how to get query parameter issue #510 + +## [v3.9.0] - 2022-07-21 - add support for http.Handler implementations to work as FilterFunction, issue #504 (thanks to https://github.com/ggicci) -## [v3.8.0] - 20221-06-06 +## [v3.8.0] - 2022-06-06 - use exact matching of allowed domain entries, issue #489 (#493) - this changes fixes [security] Authorization Bypass Through User-Controlled Key diff --git a/vendor/github.com/emicklei/go-restful/v3/README.md b/vendor/github.com/emicklei/go-restful/v3/README.md index 0625359dc4..85da90128e 100644 --- a/vendor/github.com/emicklei/go-restful/v3/README.md +++ b/vendor/github.com/emicklei/go-restful/v3/README.md @@ -96,6 +96,10 @@ There are several hooks to customize the behavior of the go-restful package. - Compression - Encoders for other serializers - Use [jsoniter](https://github.com/json-iterator/go) by building this package using a build tag, e.g. `go build -tags=jsoniter .` +- Use the variable `MergePathStrategy` to change the behaviour of composing the Route path given a root path and a local route path + - versions >= 3.10.1 has set the value to `PathJoinStrategy` that fixes a reported [security issue](https://github.com/advisories/GHSA-r48q-9g5r-8q2h) but may cause your services not to work correctly anymore. + - versions <= 3.9 had the behaviour that can be restored in newer versions by setting the value to `TrimSlashStrategy`. + - you can set value to a custom implementation (must implement MergePathStrategyFunc) ## Resources diff --git a/vendor/github.com/emicklei/go-restful/v3/constants.go b/vendor/github.com/emicklei/go-restful/v3/constants.go index 203439c5e5..2328bde6c7 100644 --- a/vendor/github.com/emicklei/go-restful/v3/constants.go +++ b/vendor/github.com/emicklei/go-restful/v3/constants.go @@ -7,12 +7,14 @@ package restful const ( MIME_XML = "application/xml" // Accept or Content-Type used in Consumes() and/or Produces() MIME_JSON = "application/json" // Accept or Content-Type used in Consumes() and/or Produces() + MIME_ZIP = "application/zip" // Accept or Content-Type used in Consumes() and/or Produces() MIME_OCTET = "application/octet-stream" // If Content-Type is not present in request, use the default HEADER_Allow = "Allow" HEADER_Accept = "Accept" HEADER_Origin = "Origin" HEADER_ContentType = "Content-Type" + HEADER_ContentDisposition = "Content-Disposition" HEADER_LastModified = "Last-Modified" HEADER_AcceptEncoding = "Accept-Encoding" HEADER_ContentEncoding = "Content-Encoding" diff --git a/vendor/github.com/emicklei/go-restful/v3/request.go b/vendor/github.com/emicklei/go-restful/v3/request.go index 5725a07595..0020095e86 100644 --- a/vendor/github.com/emicklei/go-restful/v3/request.go +++ b/vendor/github.com/emicklei/go-restful/v3/request.go @@ -31,7 +31,8 @@ func NewRequest(httpRequest *http.Request) *Request { // a "Unable to unmarshal content of type:" response is returned. // Valid values are restful.MIME_JSON and restful.MIME_XML // Example: -// restful.DefaultRequestContentType(restful.MIME_JSON) +// +// restful.DefaultRequestContentType(restful.MIME_JSON) func DefaultRequestContentType(mime string) { defaultRequestContentType = mime } @@ -48,7 +49,7 @@ func (r *Request) PathParameters() map[string]string { // QueryParameter returns the (first) Query parameter value by its name func (r *Request) QueryParameter(name string) string { - return r.Request.FormValue(name) + return r.Request.URL.Query().Get(name) } // QueryParameters returns the all the query parameters values by name diff --git a/vendor/github.com/emicklei/go-restful/v3/response.go b/vendor/github.com/emicklei/go-restful/v3/response.go index 8f0b56aa2d..a41a92cc2c 100644 --- a/vendor/github.com/emicklei/go-restful/v3/response.go +++ b/vendor/github.com/emicklei/go-restful/v3/response.go @@ -109,6 +109,9 @@ func (r *Response) EntityWriter() (EntityReaderWriter, bool) { if DefaultResponseMimeType == MIME_XML { return entityAccessRegistry.accessorAt(MIME_XML) } + if DefaultResponseMimeType == MIME_ZIP { + return entityAccessRegistry.accessorAt(MIME_ZIP) + } // Fallback to whatever the route says it can produce. // https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html for _, each := range r.routeProduces { diff --git a/vendor/github.com/emicklei/go-restful/v3/route.go b/vendor/github.com/emicklei/go-restful/v3/route.go index 193f4a6b01..ea05b3da88 100644 --- a/vendor/github.com/emicklei/go-restful/v3/route.go +++ b/vendor/github.com/emicklei/go-restful/v3/route.go @@ -164,7 +164,7 @@ func tokenizePath(path string) []string { if "/" == path { return nil } - return strings.Split(strings.Trim(path, "/"), "/") + return strings.Split(strings.TrimLeft(path, "/"), "/") } // for debugging @@ -176,3 +176,5 @@ func (r *Route) String() string { func (r *Route) EnableContentEncoding(enabled bool) { r.contentEncodingEnabled = &enabled } + +var TrimRightSlashEnabled = false diff --git a/vendor/github.com/emicklei/go-restful/v3/route_builder.go b/vendor/github.com/emicklei/go-restful/v3/route_builder.go index 23641b6dd5..827f471de0 100644 --- a/vendor/github.com/emicklei/go-restful/v3/route_builder.go +++ b/vendor/github.com/emicklei/go-restful/v3/route_builder.go @@ -7,6 +7,7 @@ package restful import ( "fmt" "os" + "path" "reflect" "runtime" "strings" @@ -46,11 +47,12 @@ type RouteBuilder struct { // Do evaluates each argument with the RouteBuilder itself. // This allows you to follow DRY principles without breaking the fluent programming style. // Example: -// ws.Route(ws.DELETE("/{name}").To(t.deletePerson).Do(Returns200, Returns500)) // -// func Returns500(b *RouteBuilder) { -// b.Returns(500, "Internal Server Error", restful.ServiceError{}) -// } +// ws.Route(ws.DELETE("/{name}").To(t.deletePerson).Do(Returns200, Returns500)) +// +// func Returns500(b *RouteBuilder) { +// b.Returns(500, "Internal Server Error", restful.ServiceError{}) +// } func (b *RouteBuilder) Do(oneArgBlocks ...func(*RouteBuilder)) *RouteBuilder { for _, each := range oneArgBlocks { each(b) @@ -351,8 +353,28 @@ func (b *RouteBuilder) Build() Route { return route } -func concatPath(path1, path2 string) string { - return strings.TrimRight(path1, "/") + "/" + strings.TrimLeft(path2, "/") +type MergePathStrategyFunc func(rootPath, routePath string) string + +var ( + // behavior >= 3.10 + PathJoinStrategy = func(rootPath, routePath string) string { + return path.Join(rootPath, routePath) + } + + // behavior <= 3.9 + TrimSlashStrategy = func(rootPath, routePath string) string { + return strings.TrimRight(rootPath, "/") + "/" + strings.TrimLeft(routePath, "/") + } + + // MergePathStrategy is the active strategy for merging a Route path when building the routing of all WebServices. + // The value is set to PathJoinStrategy + // PathJoinStrategy is a strategy that is more strict [Security - PRISMA-2022-0227] + MergePathStrategy = PathJoinStrategy +) + +// merge two paths using the current (package global) merge path strategy. +func concatPath(rootPath, routePath string) string { + return MergePathStrategy(rootPath, routePath) } var anonymousFuncCount int32 diff --git a/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go b/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go index fb376fce2d..f0610cf1e5 100644 --- a/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go +++ b/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go @@ -26,11 +26,16 @@ var rxDupSlashes = regexp.MustCompile(`/{2,}`) // - FlagLowercaseHost // - FlagRemoveDefaultPort // - FlagRemoveDuplicateSlashes (and this was mixed in with the |) +// +// This also normalizes the URL into its urlencoded form by removing RawPath and RawFragment. func NormalizeURL(u *url.URL) { lowercaseScheme(u) lowercaseHost(u) removeDefaultPort(u) removeDuplicateSlashes(u) + + u.RawPath = "" + u.RawFragment = "" } func lowercaseScheme(u *url.URL) { diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go index e628920460..581cf7cdfa 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go @@ -59,10 +59,4 @@ const ( // AnnotationBaseImageName is the annotation key for the image reference of the image's base image. AnnotationBaseImageName = "org.opencontainers.image.base.name" - - // AnnotationArtifactCreated is the annotation key for the date and time on which the artifact was built, conforming to RFC 3339. - AnnotationArtifactCreated = "org.opencontainers.artifact.created" - - // AnnotationArtifactDescription is the annotation key for the human readable description for the artifact. - AnnotationArtifactDescription = "org.opencontainers.artifact.description" ) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go index 9654aa5af6..3004e9a40d 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go @@ -52,7 +52,7 @@ type Descriptor struct { // Platform describes the platform which the image in the manifest runs on. type Platform struct { // Architecture field specifies the CPU architecture, for example - // `amd64` or `ppc64`. + // `amd64` or `ppc64le`. Architecture string `json:"architecture"` // OS specifies the operating system, for example `linux` or `windows`. @@ -70,3 +70,11 @@ type Platform struct { // example `v7` to specify ARMv7 when architecture is `arm`. Variant string `json:"variant,omitempty"` } + +// DescriptorEmptyJSON is the descriptor of a blob with content of `{}`. +var DescriptorEmptyJSON = Descriptor{ + MediaType: MediaTypeEmptyJSON, + Digest: `sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a`, + Size: 2, + Data: []byte(`{}`), +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go index ed4a56e59e..e2bed9d4e4 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go @@ -24,9 +24,15 @@ type Index struct { // MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.index.v1+json` MediaType string `json:"mediaType,omitempty"` + // ArtifactType specifies the IANA media type of artifact when the manifest is used for an artifact. + ArtifactType string `json:"artifactType,omitempty"` + // Manifests references platform specific manifests. Manifests []Descriptor `json:"manifests"` + // Subject is an optional link from the image manifest to another manifest forming an association between the image manifest and the other manifest. + Subject *Descriptor `json:"subject,omitempty"` + // Annotations contains arbitrary metadata for the image index. Annotations map[string]string `json:"annotations,omitempty"` } diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go index 4ce7b54ccd..26fec52a6b 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go @@ -39,11 +39,3 @@ type Manifest struct { // Annotations contains arbitrary metadata for the image manifest. Annotations map[string]string `json:"annotations,omitempty"` } - -// ScratchDescriptor is the descriptor of a blob with content of `{}`. -var ScratchDescriptor = Descriptor{ - MediaType: MediaTypeScratch, - Digest: `sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a`, - Size: 2, - Data: []byte(`{}`), -} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go index 5dd31255eb..892ba3de9d 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go @@ -70,6 +70,6 @@ const ( // MediaTypeImageConfig specifies the media type for the image configuration. MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json" - // MediaTypeScratch specifies the media type for an unused blob containing the value `{}` - MediaTypeScratch = "application/vnd.oci.scratch.v1+json" + // MediaTypeEmptyJSON specifies the media type for an unused blob containing the value `{}` + MediaTypeEmptyJSON = "application/vnd.oci.empty.v1+json" ) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/vendor/github.com/opencontainers/image-spec/specs-go/version.go index 3d4119b441..e3b7ac03a9 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/version.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/version.go @@ -25,7 +25,7 @@ const ( VersionPatch = 0 // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "-rc.3" + VersionDev = "-rc.4" ) // Version is the specification version that the package types support. diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/contexts.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/contexts.go deleted file mode 100644 index ed1e1b2e9b..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/contexts.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright 2019 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - "context" -) - -// isSubstituted is used for associating the parameter substitution inside the context.Context. -type isSubstituted struct{} - -// validatePropagatedVariables is used for deciding whether to validate or skip parameters and workspaces inside the contect.Context. -type validatePropagatedVariables string - -// WithinSubstituted is used to note that it is calling within -// the context of a substitute variable operation. -func WithinSubstituted(ctx context.Context) context.Context { - return context.WithValue(ctx, isSubstituted{}, true) -} - -// IsSubstituted indicates that the variables have been substituted. -func IsSubstituted(ctx context.Context) bool { - return ctx.Value(isSubstituted{}) != nil -} - -// SkipValidationDueToPropagatedParametersAndWorkspaces sets the context to skip validation of parameters when embedded vs referenced to true or false. -func SkipValidationDueToPropagatedParametersAndWorkspaces(ctx context.Context, skip bool) context.Context { - return context.WithValue(ctx, validatePropagatedVariables("ValidatePropagatedParameterVariablesAndWorkspaces"), !skip) -} - -// ValidateParameterVariablesAndWorkspaces indicates if validation of paramater variables and workspaces should be conducted. -func ValidateParameterVariablesAndWorkspaces(ctx context.Context) bool { - return ctx.Value(validatePropagatedVariables("ValidatePropagatedParameterVariablesAndWorkspaces")) == true -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/default.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/default.go index 968dae25e8..b17f891b9c 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/default.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/default.go @@ -59,6 +59,9 @@ const ( defaultResolverTypeKey = "default-resolver-type" ) +// DefaultConfig holds all the default configurations for the config. +var DefaultConfig, _ = NewDefaultsFromMap(map[string]string{}) + // Defaults holds the default configurations // +k8s:deepcopy-gen=true type Defaults struct { @@ -67,7 +70,7 @@ type Defaults struct { DefaultManagedByLabelValue string DefaultPodTemplate *pod.Template DefaultAAPodTemplate *pod.AffinityAssistantTemplate - DefaultCloudEventsSink string + DefaultCloudEventsSink string // Deprecated. Use the events package instead DefaultTaskRunWorkspaceBinding string DefaultMaxMatrixCombinationsCount int DefaultForbiddenEnv []string diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/events.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/events.go new file mode 100644 index 0000000000..fca87c4aa7 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/events.go @@ -0,0 +1,185 @@ +/* +Copyright 2023 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "fmt" + "os" + "sort" + "strings" + + corev1 "k8s.io/api/core/v1" +) + +const ( + // FormatTektonV1 represents the "v1" events in Tekton custom format + FormatTektonV1 EventFormat = "tektonv1" + + // DefaultSink is the default value for "sink" + DefaultSink = "" + + formatsKey = "formats" + sinkKey = "sink" +) + +var ( + // TODO(afrittoli): only one valid format for now, more to come + // See TEP-0137 https://github.com/tektoncd/community/pull/1028 + validFormats = EventFormats{FormatTektonV1: struct{}{}} + + // DefaultFormat is the default value for "formats" + DefaultFormats = EventFormats{FormatTektonV1: struct{}{}} + + // DefaultConfig holds all the default configurations for the config. + DefaultEvents, _ = NewEventsFromMap(map[string]string{}) +) + +// Events holds the events configurations +// +k8s:deepcopy-gen=true +type Events struct { + Sink string + Formats EventFormats +} + +// EventFormat is a single event format +type EventFormat string + +// EventFormats is a set of event formats +type EventFormats map[EventFormat]struct{} + +// String is a string representation of an EventFormat +func (ef EventFormat) String() string { + return string(ef) +} + +// IsValid returns true is the EventFormat one of the valid ones +func (ef EventFormat) IsValid() bool { + _, ok := validFormats[ef] + return ok +} + +// String is a string representation of an EventFormats +func (efs EventFormats) String() string { + // Make an array of map keys + keys := make([]string, len(efs)) + + i := 0 + for k := range efs { + keys[i] = k.String() + i++ + } + // Sorting helps with testing + sort.Strings(keys) + + // Build a comma separated list + return strings.Join(keys, ",") +} + +// Equals defines identity between EventFormats +func (efs EventFormats) Equals(other EventFormats) bool { + if len(efs) != len(other) { + return false + } + for key := range efs { + if _, ok := other[key]; !ok { + return false + } + } + return true +} + +// ParseEventFormats converts a comma separated list into a EventFormats set +func ParseEventFormats(formats string) (EventFormats, error) { + // An empty string is not a valid configuration + if formats == "" { + return EventFormats{}, fmt.Errorf("formats cannot be empty") + } + stringFormats := strings.Split(formats, ",") + var eventFormats EventFormats = make(map[EventFormat]struct{}, len(stringFormats)) + for _, format := range stringFormats { + if !EventFormat(format).IsValid() { + return EventFormats{}, fmt.Errorf("invalid format: %s", format) + } + // If already in the map (duplicate), fail + if _, ok := eventFormats[EventFormat(format)]; ok { + return EventFormats{}, fmt.Errorf("duplicate format: %s", format) + } + eventFormats[EventFormat(format)] = struct{}{} + } + return eventFormats, nil +} + +// GetEventsConfigName returns the name of the configmap containing all +// feature flags. +func GetEventsConfigName() string { + if e := os.Getenv("CONFIG_EVENTS_NAME"); e != "" { + return e + } + return "config-events" +} + +// NewEventsFromMap returns a Config given a map corresponding to a ConfigMap +func NewEventsFromMap(cfgMap map[string]string) (*Events, error) { + // for any string field with no extra validation + setField := func(key string, defaultValue string, field *string) { + if cfg, ok := cfgMap[key]; ok { + *field = cfg + } else { + *field = defaultValue + } + } + + events := Events{} + err := setFormats(cfgMap, DefaultFormats, &events.Formats) + if err != nil { + return nil, err + } + setField(sinkKey, DefaultSink, &events.Sink) + return &events, nil +} + +func setFormats(cfgMap map[string]string, defaultValue EventFormats, field *EventFormats) error { + value := defaultValue + if cfg, ok := cfgMap[formatsKey]; ok { + v, err := ParseEventFormats(cfg) + if err != nil { + return err + } + value = v + } + *field = value + return nil +} + +// NewEventsFromConfigMap returns a Config for the given configmap +func NewEventsFromConfigMap(config *corev1.ConfigMap) (*Events, error) { + return NewEventsFromMap(config.Data) +} + +// Equals returns true if two Configs are identical +func (cfg *Events) Equals(other *Events) bool { + if cfg == nil && other == nil { + return true + } + + if cfg == nil || other == nil { + return false + } + + return other.Sink == cfg.Sink && + other.Formats.Equals(cfg.Formats) +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/feature_flags.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/feature_flags.go index 699a65516e..8e0054daf4 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/feature_flags.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/feature_flags.go @@ -42,6 +42,14 @@ const ( // IgnoreNoMatchPolicy is the value used for "trusted-resources-verification-no-match-policy" to skip verification // when no matching policies are found IgnoreNoMatchPolicy = "ignore" + // CoscheduleWorkspaces is the value used for "coschedule" to coschedule PipelineRun Pods sharing the same PVC workspaces to the same node + CoscheduleWorkspaces = "workspaces" + // CoschedulePipelineRuns is the value used for "coschedule" to coschedule all PipelineRun Pods to the same node + CoschedulePipelineRuns = "pipelineruns" + // CoscheduleIsolatePipelineRun is the value used for "coschedule" to coschedule all PipelineRun Pods to the same node, and only allows one PipelineRun to run on a node at a time + CoscheduleIsolatePipelineRun = "isolate-pipelinerun" + // CoscheduleDisabled is the value used for "coschedule" to disabled PipelineRun Pods coschedule + CoscheduleDisabled = "disabled" // ResultExtractionMethodTerminationMessage is the value used for "results-from" as a way to extract results from tasks using kubernetes termination message. ResultExtractionMethodTerminationMessage = "termination-message" // ResultExtractionMethodSidecarLogs is the value used for "results-from" as a way to extract results from tasks using sidecar logs. @@ -59,7 +67,7 @@ const ( // DefaultEnableTektonOciBundles is the default value for "enable-tekton-oci-bundles". DefaultEnableTektonOciBundles = false // DefaultEnableAPIFields is the default value for "enable-api-fields". - DefaultEnableAPIFields = StableAPIFields + DefaultEnableAPIFields = BetaAPIFields // DefaultSendCloudEventsForRuns is the default value for "send-cloudevents-for-runs". DefaultSendCloudEventsForRuns = false // EnforceNonfalsifiabilityWithSpire is the value used for "enable-nonfalsifiability" when SPIRE is used to enable non-falsifiability. @@ -71,11 +79,15 @@ const ( // DefaultNoMatchPolicyConfig is the default value for "trusted-resources-verification-no-match-policy". DefaultNoMatchPolicyConfig = IgnoreNoMatchPolicy // DefaultEnableProvenanceInStatus is the default value for "enable-provenance-status". - DefaultEnableProvenanceInStatus = false + DefaultEnableProvenanceInStatus = true // DefaultResultExtractionMethod is the default value for ResultExtractionMethod DefaultResultExtractionMethod = ResultExtractionMethodTerminationMessage // DefaultMaxResultSize is the default value in bytes for the size of a result DefaultMaxResultSize = 4096 + // DefaultSetSecurityContext is the default value for "set-security-context" + DefaultSetSecurityContext = false + // DefaultCoschedule is the default value for coschedule + DefaultCoschedule = CoscheduleWorkspaces disableAffinityAssistantKey = "disable-affinity-assistant" disableCredsInitKey = "disable-creds-init" @@ -90,8 +102,13 @@ const ( enableProvenanceInStatus = "enable-provenance-in-status" resultExtractionMethod = "results-from" maxResultSize = "max-result-size" + setSecurityContextKey = "set-security-context" + coscheduleKey = "coschedule" ) +// DefaultFeatureFlags holds all the default configurations for the feature flags configmap. +var DefaultFeatureFlags, _ = NewFeatureFlagsFromMap(map[string]string{}) + // FeatureFlags holds the features configurations // +k8s:deepcopy-gen=true // @@ -116,6 +133,8 @@ type FeatureFlags struct { EnableProvenanceInStatus bool ResultExtractionMethod string MaxResultSize int + SetSecurityContext bool + Coschedule string } // GetFeatureFlagsConfigName returns the name of the configmap containing all @@ -179,7 +198,13 @@ func NewFeatureFlagsFromMap(cfgMap map[string]string) (*FeatureFlags, error) { if err := setEnforceNonFalsifiability(cfgMap, tc.EnableAPIFields, &tc.EnforceNonfalsifiability); err != nil { return nil, err } + if err := setFeature(setSecurityContextKey, DefaultSetSecurityContext, &tc.SetSecurityContext); err != nil { + return nil, err + } + if err := setCoschedule(cfgMap, DefaultCoschedule, tc.DisableAffinityAssistant, &tc.Coschedule); err != nil { + return nil, err + } // Given that they are alpha features, Tekton Bundles and Custom Tasks should be switched on if // enable-api-fields is "alpha". If enable-api-fields is not "alpha" then fall back to the value of // each feature's individual flag. @@ -212,6 +237,29 @@ func setEnabledAPIFields(cfgMap map[string]string, defaultValue string, feature return nil } +// setCoschedule sets the "coschedule" flag based on the content of a given map. +// If the feature gate is invalid or incompatible with `disable-affinity-assistant`, then an error is returned. +func setCoschedule(cfgMap map[string]string, defaultValue string, disabledAffinityAssistant bool, feature *string) error { + value := defaultValue + if cfg, ok := cfgMap[coscheduleKey]; ok { + value = strings.ToLower(cfg) + } + + switch value { + case CoscheduleDisabled, CoscheduleWorkspaces, CoschedulePipelineRuns, CoscheduleIsolatePipelineRun: + // validate that "coschedule" is compatible with "disable-affinity-assistant" + // "coschedule" must be set to "workspaces" when "disable-affinity-assistant" is false + if !disabledAffinityAssistant && value != CoscheduleWorkspaces { + return fmt.Errorf("coschedule value %v is incompatible with %v setting to false", value, disableAffinityAssistantKey) + } + *feature = value + default: + return fmt.Errorf("invalid value for feature flag %q: %q", coscheduleKey, value) + } + + return nil +} + // setEnforceNonFalsifiability sets the "enforce-nonfalsifiability" flag based on the content of a given map. // If the feature gate is invalid, then an error is returned. func setEnforceNonFalsifiability(cfgMap map[string]string, enableAPIFields string, feature *string) error { @@ -223,22 +271,11 @@ func setEnforceNonFalsifiability(cfgMap map[string]string, enableAPIFields strin // validate that "enforce-nonfalsifiability" is set to a valid value switch value { case EnforceNonfalsifiabilityNone, EnforceNonfalsifiabilityWithSpire: - break - default: - return fmt.Errorf("invalid value for feature flag %q: %q", enforceNonfalsifiability, value) - } - - // validate that "enforce-nonfalsifiability" is set to allowed values for stability level - switch enableAPIFields { - case AlphaAPIFields: *feature = value + return nil default: - // Do not consider any form of non-falsifiability enforcement in non-alpha mode - if value != DefaultEnforceNonfalsifiability { - return fmt.Errorf("%q can be set to non-default values (%q) only in alpha", enforceNonfalsifiability, value) - } + return fmt.Errorf("invalid value for feature flag %q: %q", enforceNonfalsifiability, value) } - return nil } // setResultExtractionMethod sets the "results-from" flag based on the content of a given map. @@ -297,46 +334,12 @@ func NewFeatureFlagsFromConfigMap(config *corev1.ConfigMap) (*FeatureFlags, erro return NewFeatureFlagsFromMap(config.Data) } -// EnableAlphaAPIFields enables alpha features in an existing context (for use in testing) -func EnableAlphaAPIFields(ctx context.Context) context.Context { - return setEnableAPIFields(ctx, AlphaAPIFields) -} - -// EnableBetaAPIFields enables beta features in an existing context (for use in testing) -func EnableBetaAPIFields(ctx context.Context) context.Context { - return setEnableAPIFields(ctx, BetaAPIFields) -} - -// EnableStableAPIFields enables stable features in an existing context (for use in testing) -func EnableStableAPIFields(ctx context.Context) context.Context { - return setEnableAPIFields(ctx, StableAPIFields) -} - // GetVerificationNoMatchPolicy returns the "trusted-resources-verification-no-match-policy" value func GetVerificationNoMatchPolicy(ctx context.Context) string { return FromContextOrDefaults(ctx).FeatureFlags.VerificationNoMatchPolicy } -// CheckAlphaOrBetaAPIFields return true if the enable-api-fields is either set to alpha or set to beta -func CheckAlphaOrBetaAPIFields(ctx context.Context) bool { - cfg := FromContextOrDefaults(ctx) - return cfg.FeatureFlags.EnableAPIFields == AlphaAPIFields || cfg.FeatureFlags.EnableAPIFields == BetaAPIFields -} - // IsSpireEnabled checks if non-falsifiable provenance is enforced through SPIRE func IsSpireEnabled(ctx context.Context) bool { return FromContextOrDefaults(ctx).FeatureFlags.EnforceNonfalsifiability == EnforceNonfalsifiabilityWithSpire } - -func setEnableAPIFields(ctx context.Context, want string) context.Context { - featureFlags, _ := NewFeatureFlagsFromMap(map[string]string{ - "enable-api-fields": want, - }) - cfg := &Config{ - Defaults: &Defaults{ - DefaultTimeoutMinutes: 60, - }, - FeatureFlags: featureFlags, - } - return ToContext(ctx, cfg) -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/metrics.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/metrics.go index 5b369909ad..9422ec5eb4 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/metrics.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/metrics.go @@ -82,6 +82,9 @@ const ( DurationPipelinerunTypeLastValue = "lastvalue" ) +// DefaultMetrics holds all the default configurations for the metrics. +var DefaultMetrics, _ = newMetricsFromMap(map[string]string{}) + // Metrics holds the configurations for the metrics // +k8s:deepcopy-gen=true type Metrics struct { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/spire_config.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/spire_config.go index 7ad507f202..4a293f042d 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/spire_config.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/spire_config.go @@ -47,6 +47,9 @@ const ( SpireNodeAliasPrefixDefault = "/tekton-node/" ) +// DefaultSpire hols all the default configurations for the spire. +var DefaultSpire, _ = NewSpireConfigFromMap(map[string]string{}) + // NewSpireConfigFromMap creates a Config from the supplied map func NewSpireConfigFromMap(data map[string]string) (*sc.SpireConfig, error) { cfg := &sc.SpireConfig{} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/store.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/store.go index 9cb15bdf0b..7630719693 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/store.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/store.go @@ -32,6 +32,7 @@ type Config struct { FeatureFlags *FeatureFlags Metrics *Metrics SpireConfig *sc.SpireConfig + Events *Events } // FromContext extracts a Config from the provided context. @@ -49,16 +50,13 @@ func FromContextOrDefaults(ctx context.Context) *Config { if cfg := FromContext(ctx); cfg != nil { return cfg } - defaults, _ := NewDefaultsFromMap(map[string]string{}) - featureFlags, _ := NewFeatureFlagsFromMap(map[string]string{}) - metrics, _ := newMetricsFromMap(map[string]string{}) - spireconfig, _ := NewSpireConfigFromMap(map[string]string{}) return &Config{ - Defaults: defaults, - FeatureFlags: featureFlags, - Metrics: metrics, - SpireConfig: spireconfig, + Defaults: DefaultConfig.DeepCopy(), + FeatureFlags: DefaultFeatureFlags.DeepCopy(), + Metrics: DefaultMetrics.DeepCopy(), + SpireConfig: DefaultSpire.DeepCopy(), + Events: DefaultEvents.DeepCopy(), } } @@ -85,6 +83,7 @@ func NewStore(logger configmap.Logger, onAfterStore ...func(name string, value i GetFeatureFlagsConfigName(): NewFeatureFlagsFromConfigMap, GetMetricsConfigName(): NewMetricsFromConfigMap, GetSpireConfigName(): NewSpireConfigFromConfigMap, + GetEventsConfigName(): NewEventsFromConfigMap, }, onAfterStore..., ), @@ -102,20 +101,23 @@ func (s *Store) ToContext(ctx context.Context) context.Context { func (s *Store) Load() *Config { defaults := s.UntypedLoad(GetDefaultsConfigName()) if defaults == nil { - defaults, _ = NewDefaultsFromMap(map[string]string{}) + defaults = DefaultConfig.DeepCopy() } featureFlags := s.UntypedLoad(GetFeatureFlagsConfigName()) if featureFlags == nil { - featureFlags, _ = NewFeatureFlagsFromMap(map[string]string{}) + featureFlags = DefaultFeatureFlags.DeepCopy() } metrics := s.UntypedLoad(GetMetricsConfigName()) if metrics == nil { - metrics, _ = newMetricsFromMap(map[string]string{}) + metrics = DefaultMetrics.DeepCopy() } - spireconfig := s.UntypedLoad(GetSpireConfigName()) if spireconfig == nil { - spireconfig, _ = NewSpireConfigFromMap(map[string]string{}) + spireconfig = DefaultSpire.DeepCopy() + } + events := s.UntypedLoad(GetEventsConfigName()) + if events == nil { + events = DefaultEvents.DeepCopy() } return &Config{ @@ -123,5 +125,6 @@ func (s *Store) Load() *Config { FeatureFlags: featureFlags.(*FeatureFlags).DeepCopy(), Metrics: metrics.(*Metrics).DeepCopy(), SpireConfig: spireconfig.(*sc.SpireConfig).DeepCopy(), + Events: events.(*Events).DeepCopy(), } } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/zz_generated.deepcopy.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/zz_generated.deepcopy.go index 15d1070d8e..0d0bff08fa 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/zz_generated.deepcopy.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/zz_generated.deepcopy.go @@ -56,6 +56,29 @@ func (in *Defaults) DeepCopy() *Defaults { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Events) DeepCopyInto(out *Events) { + *out = *in + if in.Formats != nil { + in, out := &in.Formats, &out.Formats + *out = make(EventFormats, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Events. +func (in *Events) DeepCopy() *Events { + if in == nil { + return nil + } + out := new(Events) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FeatureFlags) DeepCopyInto(out *FeatureFlags) { *out = *in diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/storagetypes.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/constant.go similarity index 67% rename from vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/storagetypes.go rename to vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/constant.go index edccac25bf..19604205b8 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/storagetypes.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/constant.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors +Copyright 2023 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,9 +17,6 @@ limitations under the License. package pipeline const ( - // ArtifactStorageBucketType holds the name of the PipelineResource type for a bucket - ArtifactStorageBucketType = "bucket" - - // ArtifactStoragePVCType holds the name of the PipelineResource type for a pvc - ArtifactStoragePVCType = "pvc" + // TektonReservedAnnotationExpr is the expression we use to filter out reserved key in annotation + TektonReservedAnnotationExpr = "(chains.tekton.dev)/.*" ) diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/matrix_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/matrix_types.go index 67fb8a6b81..f51a0ac4af 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/matrix_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/matrix_types.go @@ -300,29 +300,17 @@ func (m *Matrix) validateCombinationsCount(ctx context.Context) (errs *apis.Fiel return errs } -// validateParams validates the type of Parameter for Matrix.Params and Matrix.Include.Params -// Matrix.Params must be of type array. Matrix.Include.Params must be of type string. -// validateParams also validates Matrix.Params for a unique list of params +// validateUniqueParams validates Matrix.Params for a unique list of params // and a unique list of params in each Matrix.Include.Params specification -func (m *Matrix) validateParams() (errs *apis.FieldError) { +func (m *Matrix) validateUniqueParams() (errs *apis.FieldError) { if m != nil { if m.HasInclude() { for i, include := range m.Include { errs = errs.Also(include.Params.validateDuplicateParameters().ViaField(fmt.Sprintf("matrix.include[%d].params", i))) - for _, param := range include.Params { - if param.Value.Type != ParamTypeString { - errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("parameters of type string only are allowed, but got param type %s", string(param.Value.Type)), "").ViaFieldKey("matrix.include.params", param.Name)) - } - } } } if m.HasParams() { errs = errs.Also(m.Params.validateDuplicateParameters().ViaField("matrix.params")) - for _, param := range m.Params { - if param.Value.Type != ParamTypeArray { - errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("parameters of type array only are allowed, but got param type %s", string(param.Value.Type)), "").ViaFieldKey("matrix.params", param.Name)) - } - } } } return errs diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/openapi_generated.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/openapi_generated.go index 5915648f90..abb49a962e 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/openapi_generated.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/openapi_generated.go @@ -1875,7 +1875,7 @@ func schema_pkg_apis_pipeline_v1_PipelineTask(ref common.ReferenceCallback) comm }, "timeout": { SchemaProps: spec.SchemaProps{ - Description: "Time after which the TaskRun times out. Defaults to 1 hour. Specified TaskRun timeout should be less than 24h. Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", + Description: "Time after which the TaskRun times out. Defaults to 1 hour. Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, @@ -3634,7 +3634,7 @@ func schema_pkg_apis_pipeline_v1_TaskRunSpec(ref common.ReferenceCallback) commo }, "timeout": { SchemaProps: spec.SchemaProps{ - Description: "Time after which one retry attempt times out. Defaults to 1 hour. Specified build timeout should be less than 24h. Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", + Description: "Time after which one retry attempt times out. Defaults to 1 hour. Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/param_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/param_types.go index 746cd3d4cb..b47e45f72b 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/param_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/param_types.go @@ -20,7 +20,6 @@ import ( "context" "encoding/json" "fmt" - "regexp" "strings" "github.com/tektoncd/pipeline/pkg/substitution" @@ -29,12 +28,6 @@ import ( "knative.dev/pkg/apis" ) -// exactVariableSubstitutionFormat matches strings that only contain a single reference to result or param variables, but nothing else -// i.e. `$(result.resultname)` is a match, but `foo $(result.resultname)` is not. -const exactVariableSubstitutionFormat = `^\$\([_a-zA-Z0-9.-]+(\.[_a-zA-Z0-9.-]+)*(\[([0-9]+|\*)\])?\)$` - -var exactVariableSubstitutionRegex = regexp.MustCompile(exactVariableSubstitutionFormat) - // ParamsPrefix is the prefix used in $(...) expressions referring to parameters const ParamsPrefix = "params" @@ -110,6 +103,51 @@ func (pp *ParamSpec) setDefaultsForProperties() { } } +// getNames returns all the names of the declared parameters +func (ps ParamSpecs) getNames() []string { + var names []string + for _, p := range ps { + names = append(names, p.Name) + } + return names +} + +// sortByType splits the input params into string params, array params, and object params, in that order +func (ps ParamSpecs) sortByType() (ParamSpecs, ParamSpecs, ParamSpecs) { + var stringParams, arrayParams, objectParams ParamSpecs + for _, p := range ps { + switch p.Type { + case ParamTypeArray: + arrayParams = append(arrayParams, p) + case ParamTypeObject: + objectParams = append(objectParams, p) + case ParamTypeString: + fallthrough + default: + stringParams = append(stringParams, p) + } + } + return stringParams, arrayParams, objectParams +} + +// validateNoDuplicateNames returns an error if any of the params have the same name +func (ps ParamSpecs) validateNoDuplicateNames() *apis.FieldError { + names := ps.getNames() + seen := sets.String{} + dups := sets.String{} + var errs *apis.FieldError + for _, n := range names { + if seen.Has(n) { + dups.Insert(n) + } + seen.Insert(n) + } + for n := range dups { + errs = errs.Also(apis.ErrGeneric("parameter appears more than once", "").ViaFieldKey("params", n)) + } + return errs +} + // Param declares an ParamValues to use for the parameter called name. type Param struct { Name string `json:"name"` @@ -150,9 +188,9 @@ func (ps Params) extractParamMapArrVals() map[string][]string { // Params is a list of Param type Params []Param -// extractParamArrayLengths extract and return the lengths of all array params +// ExtractParamArrayLengths extract and return the lengths of all array params // Example of returned value: {"a-array-params": 2,"b-array-params": 2 } -func (ps Params) extractParamArrayLengths() map[string]int { +func (ps Params) ExtractParamArrayLengths() map[string]int { // Collect all array params arrayParamsLengths := make(map[string]int) @@ -178,9 +216,18 @@ func (ps Params) validateDuplicateParameters() (errs *apis.FieldError) { return errs } -// extractParamArrayLengths extract and return the lengths of all array params +// ReplaceVariables applies string, array and object replacements to variables in Params +func (ps Params) ReplaceVariables(stringReplacements map[string]string, arrayReplacements map[string][]string, objectReplacements map[string]map[string]string) Params { + params := ps.DeepCopy() + for i := range params { + params[i].Value.ApplyReplacements(stringReplacements, arrayReplacements, objectReplacements) + } + return params +} + +// ExtractDefaultParamArrayLengths extract and return the lengths of all array params // Example of returned value: {"a-array-params": 2,"b-array-params": 2 } -func (ps ParamSpecs) extractParamArrayLengths() map[string]int { +func (ps ParamSpecs) ExtractDefaultParamArrayLengths() map[string]int { // Collect all array params arrayParamsLengths := make(map[string]int) @@ -195,30 +242,6 @@ func (ps ParamSpecs) extractParamArrayLengths() map[string]int { return arrayParamsLengths } -// validateOutofBoundArrayParams validates if the array indexing params are out of bound -// example of arrayIndexingParams: ["$(params.a-array-param[1])", "$(params.b-array-param[2])"] -// example of arrayParamsLengths: {"a-array-params": 2,"b-array-params": 2 } -func validateOutofBoundArrayParams(arrayIndexingParams []string, arrayParamsLengths map[string]int) error { - outofBoundParams := sets.String{} - for _, val := range arrayIndexingParams { - indexString := substitution.ExtractIndexString(val) - idx, _ := substitution.ExtractIndex(indexString) - // this will extract the param name from reference - // e.g. $(params.a-array-param[1]) -> a-array-param - paramName, _, _ := substitution.ExtractVariablesFromString(substitution.TrimArrayIndex(val), "params") - - if paramLength, ok := arrayParamsLengths[paramName[0]]; ok { - if idx >= paramLength { - outofBoundParams.Insert(val) - } - } - } - if outofBoundParams.Len() > 0 { - return fmt.Errorf("non-existent param references:%v", outofBoundParams.List()) - } - return nil -} - // extractArrayIndexingParamRefs takes a string of the form `foo-$(params.array-param[1])-bar` and extracts the portions of the string that reference an element in an array param. // For example, for the string “foo-$(params.array-param[1])-bar-$(params.other-array-param[2])-$(params.string-param)`, // it would return ["$(params.array-param[1])", "$(params.other-array-param[2])"]. @@ -443,7 +466,7 @@ func (paramValues ParamValue) MarshalJSON() ([]byte, error) { func (paramValues *ParamValue) ApplyReplacements(stringReplacements map[string]string, arrayReplacements map[string][]string, objectReplacements map[string]map[string]string) { switch paramValues.Type { case ParamTypeArray: - var newArrayVal []string + newArrayVal := []string{} for _, v := range paramValues.ArrayVal { newArrayVal = append(newArrayVal, substitution.ApplyArrayReplacements(v, stringReplacements, arrayReplacements)...) } @@ -475,7 +498,7 @@ func (paramValues *ParamValue) applyOrCorrect(stringReplacements map[string]stri // trim the head "$(" and the tail ")" or "[*])" // i.e. get "params.name" from "$(params.name)" or "$(params.name[*])" - trimedStringVal := StripStarVarSubExpression(stringVal) + trimedStringVal := substitution.StripStarVarSubExpression(stringVal) // if the stringVal is a reference to a string param if _, ok := stringReplacements[trimedStringVal]; ok { @@ -497,11 +520,6 @@ func (paramValues *ParamValue) applyOrCorrect(stringReplacements map[string]stri } } -// StripStarVarSubExpression strips "$(target[*])"" to get "target" -func StripStarVarSubExpression(s string) string { - return strings.TrimSuffix(strings.TrimSuffix(strings.TrimPrefix(s, "$("), ")"), "[*]") -} - // NewStructuredValues creates an ParamValues of type ParamTypeString or ParamTypeArray, based on // how many inputs are given (>1 input will create an array, not string). func NewStructuredValues(value string, values ...string) *ParamValue { @@ -573,23 +591,23 @@ func validateParamStringValue(param Param, prefix string, paramNames sets.String // validateStringVariable validates the normal string fields that can only accept references to string param or individual keys of object param func validateStringVariable(value, prefix string, stringVars sets.String, arrayVars sets.String, objectParamNameKeys map[string][]string) *apis.FieldError { - errs := substitution.ValidateVariableP(value, prefix, stringVars) + errs := substitution.ValidateNoReferencesToUnknownVariables(value, prefix, stringVars) errs = errs.Also(validateObjectVariable(value, prefix, objectParamNameKeys)) - return errs.Also(substitution.ValidateVariableProhibitedP(value, prefix, arrayVars)) + return errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(value, prefix, arrayVars)) } func validateArrayVariable(value, prefix string, stringVars sets.String, arrayVars sets.String, objectParamNameKeys map[string][]string) *apis.FieldError { - errs := substitution.ValidateVariableP(value, prefix, stringVars) + errs := substitution.ValidateNoReferencesToUnknownVariables(value, prefix, stringVars) errs = errs.Also(validateObjectVariable(value, prefix, objectParamNameKeys)) - return errs.Also(substitution.ValidateVariableIsolatedP(value, prefix, arrayVars)) + return errs.Also(substitution.ValidateVariableReferenceIsIsolated(value, prefix, arrayVars)) } func validateObjectVariable(value, prefix string, objectParamNameKeys map[string][]string) (errs *apis.FieldError) { objectNames := sets.NewString() for objectParamName, keys := range objectParamNameKeys { objectNames.Insert(objectParamName) - errs = errs.Also(substitution.ValidateVariableP(value, fmt.Sprintf("%s\\.%s", prefix, objectParamName), sets.NewString(keys...))) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(value, fmt.Sprintf("%s\\.%s", prefix, objectParamName), sets.NewString(keys...))) } - return errs.Also(substitution.ValidateEntireVariableProhibitedP(value, prefix, objectNames)) + return errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(value, prefix, objectNames)) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipeline_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipeline_types.go index d338944881..e0ce5ec677 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipeline_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipeline_types.go @@ -203,7 +203,6 @@ type PipelineTask struct { Workspaces []WorkspacePipelineTaskBinding `json:"workspaces,omitempty"` // Time after which the TaskRun times out. Defaults to 1 hour. - // Specified TaskRun timeout should be less than 24h. // Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration // +optional Timeout *metav1.Duration `json:"timeout,omitempty"` diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipeline_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipeline_validation.go index 0c60fae33b..e1a7464c35 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipeline_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipeline_validation.go @@ -46,8 +46,18 @@ func (p *Pipeline) SupportedVerbs() []admissionregistrationv1.OperationType { // that any references resources exist, that is done at run time. func (p *Pipeline) Validate(ctx context.Context) *apis.FieldError { errs := validate.ObjectMetadata(p.GetObjectMeta()).ViaField("metadata") - ctx = config.SkipValidationDueToPropagatedParametersAndWorkspaces(ctx, false) - return errs.Also(p.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec")) + errs = errs.Also(p.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec")) + // When a Pipeline is created directly, instead of declared inline in a PipelineRun, + // we do not support propagated parameters and workspaces. + // Validate that all params and workspaces it uses are declared. + errs = errs.Also(p.Spec.validatePipelineParameterUsage(ctx).ViaField("spec")) + errs = errs.Also(p.Spec.validatePipelineWorkspacesUsage().ViaField("spec")) + // Validate beta fields when a Pipeline is defined, but not as part of validating a Pipeline spec. + // This prevents validation from failing when a Pipeline is converted to a different API version. + // See https://github.com/tektoncd/pipeline/issues/6616 for more information. + // TODO(#6592): Decouple API versioning from feature versioning + errs = errs.Also(p.Spec.ValidateBetaFields(ctx)) + return errs } // Validate checks that taskNames in the Pipeline are valid and that the graph @@ -68,8 +78,6 @@ func (ps *PipelineSpec) Validate(ctx context.Context) (errs *apis.FieldError) { errs = errs.Also(validateExecutionStatusVariables(ps.Tasks, ps.Finally)) // Validate the pipeline's workspaces. errs = errs.Also(validatePipelineWorkspacesDeclarations(ps.Workspaces)) - errs = errs.Also(validatePipelineWorkspacesUsage(ctx, ps.Workspaces, ps.Tasks).ViaField("tasks")) - errs = errs.Also(validatePipelineWorkspacesUsage(ctx, ps.Workspaces, ps.Finally).ViaField("finally")) // Validate the pipeline's results errs = errs.Also(validatePipelineResults(ps.Results, ps.Tasks, ps.Finally)) errs = errs.Also(validateTasksAndFinallySection(ps)) @@ -81,6 +89,60 @@ func (ps *PipelineSpec) Validate(ctx context.Context) (errs *apis.FieldError) { return errs } +// ValidateBetaFields returns an error if the Pipeline spec uses beta features but does not +// have "enable-api-fields" set to "alpha" or "beta". +func (ps *PipelineSpec) ValidateBetaFields(ctx context.Context) *apis.FieldError { + var errs *apis.FieldError + // Object parameters + for i, p := range ps.Params { + if p.Type == ParamTypeObject { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "object type parameter", config.BetaAPIFields).ViaFieldIndex("params", i)) + } + } + // Indexing into array parameters + arrayParamIndexingRefs := ps.GetIndexingReferencesToArrayParams() + if len(arrayParamIndexingRefs) != 0 { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "indexing into array parameters", config.BetaAPIFields)) + } + // array and object results + for i, result := range ps.Results { + switch result.Type { + case ResultsTypeObject: + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "object results", config.BetaAPIFields).ViaFieldIndex("results", i)) + case ResultsTypeArray: + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "array results", config.BetaAPIFields).ViaFieldIndex("results", i)) + case ResultsTypeString: + default: + } + } + for i, pt := range ps.Tasks { + errs = errs.Also(pt.validateBetaFields(ctx).ViaFieldIndex("tasks", i)) + } + for i, pt := range ps.Finally { + errs = errs.Also(pt.validateBetaFields(ctx).ViaFieldIndex("tasks", i)) + } + + return errs +} + +// validateBetaFields returns an error if the PipelineTask uses beta features but does not +// have "enable-api-fields" set to "alpha" or "beta". +func (pt *PipelineTask) validateBetaFields(ctx context.Context) *apis.FieldError { + var errs *apis.FieldError + if pt.TaskRef != nil { + // Resolvers + if pt.TaskRef.Resolver != "" { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "taskref.resolver", config.BetaAPIFields)) + } + if len(pt.TaskRef.Params) > 0 { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "taskref.params", config.BetaAPIFields)) + } + } else if pt.TaskSpec != nil { + errs = errs.Also(pt.TaskSpec.ValidateBetaFields(ctx)) + } + return errs +} + // ValidatePipelineTasks ensures that pipeline tasks has unique label, pipeline tasks has specified one of // taskRef or taskSpec, and in case of a pipeline task with taskRef, it has a reference to a valid task (task name) func ValidatePipelineTasks(ctx context.Context, tasks []PipelineTask, finalTasks []PipelineTask) *apis.FieldError { @@ -107,6 +169,16 @@ func (l PipelineTaskList) Validate(ctx context.Context, taskNames sets.String, p return errs } +// validateUsageOfDeclaredPipelineTaskParameters validates that all parameters referenced in the pipeline Task are declared by the pipeline Task. +func (l PipelineTaskList) validateUsageOfDeclaredPipelineTaskParameters(ctx context.Context, path string) (errs *apis.FieldError) { + for i, t := range l { + if t.TaskSpec != nil { + errs = errs.Also(ValidateUsageOfDeclaredParameters(ctx, t.TaskSpec.Steps, t.TaskSpec.Params).ViaFieldIndex(path, i)) + } + } + return errs +} + // ValidateName checks whether the PipelineTask's name is a valid DNS label func (pt PipelineTask) ValidateName() *apis.FieldError { if err := validation.IsDNS1123Label(pt.Name); len(err) > 0 { @@ -126,11 +198,21 @@ func (pt PipelineTask) Validate(ctx context.Context) (errs *apis.FieldError) { errs = errs.Also(pt.validateRefOrSpec()) errs = errs.Also(pt.validateEmbeddedOrType()) - + // taskKinds contains the kinds when the apiVersion is not set, they are not custom tasks, + // if apiVersion is set they are custom tasks. + taskKinds := map[TaskKind]bool{ + "": true, + NamespacedTaskKind: true, + ClusterTaskRefKind: true, + } // Pipeline task having taskRef/taskSpec with APIVersion is classified as custom task switch { + case pt.TaskRef != nil && !taskKinds[pt.TaskRef.Kind]: + errs = errs.Also(pt.validateCustomTask()) case pt.TaskRef != nil && pt.TaskRef.APIVersion != "": errs = errs.Also(pt.validateCustomTask()) + case pt.TaskSpec != nil && !taskKinds[TaskKind(pt.TaskSpec.Kind)]: + errs = errs.Also(pt.validateCustomTask()) case pt.TaskSpec != nil && pt.TaskSpec.APIVersion != "": errs = errs.Also(pt.validateCustomTask()) default: @@ -145,9 +227,9 @@ func (pt *PipelineTask) validateMatrix(ctx context.Context) (errs *apis.FieldErr // when the enable-api-fields feature gate is anything but "alpha". errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "matrix", config.AlphaAPIFields)) errs = errs.Also(pt.Matrix.validateCombinationsCount(ctx)) + errs = errs.Also(pt.Matrix.validateUniqueParams()) } errs = errs.Also(pt.Matrix.validateParameterInOneOfMatrixOrParams(pt.Params)) - errs = errs.Also(pt.Matrix.validateParams()) return errs } @@ -239,29 +321,12 @@ func (pt PipelineTask) validateCustomTask() (errs *apis.FieldError) { // validateTask validates a pipeline task or a final task for taskRef and taskSpec func (pt PipelineTask) validateTask(ctx context.Context) (errs *apis.FieldError) { - cfg := config.FromContextOrDefaults(ctx) // Validate TaskSpec if it's present if pt.TaskSpec != nil { errs = errs.Also(pt.TaskSpec.Validate(ctx).ViaField("taskSpec")) } if pt.TaskRef != nil { - if pt.TaskRef.Name != "" { - // TaskRef name must be a valid k8s name - if errSlice := validation.IsQualifiedName(pt.TaskRef.Name); len(errSlice) != 0 { - errs = errs.Also(apis.ErrInvalidValue(strings.Join(errSlice, ","), "name")) - } - } else if pt.TaskRef.Resolver == "" { - errs = errs.Also(apis.ErrInvalidValue("taskRef must specify name", "taskRef.name")) - } - if cfg.FeatureFlags.EnableAPIFields != config.BetaAPIFields && cfg.FeatureFlags.EnableAPIFields != config.AlphaAPIFields { - // fail if resolver or resource are present when enable-api-fields is false. - if pt.TaskRef.Resolver != "" { - errs = errs.Also(apis.ErrDisallowedFields("taskref.resolver")) - } - if len(pt.TaskRef.Params) > 0 { - errs = errs.Also(apis.ErrDisallowedFields("taskref.params")) - } - } + errs = errs.Also(pt.TaskRef.Validate(ctx).ViaField("taskRef")) } return errs } @@ -285,12 +350,43 @@ func validatePipelineWorkspacesDeclarations(wss []PipelineWorkspaceDeclaration) return errs } -// validatePipelineWorkspacesUsage validates that all the referenced workspaces (by pipeline tasks) are specified in -// the pipeline -func validatePipelineWorkspacesUsage(ctx context.Context, wss []PipelineWorkspaceDeclaration, pts []PipelineTask) (errs *apis.FieldError) { - if !config.ValidateParameterVariablesAndWorkspaces(ctx) { - return nil +// validatePipelineParameterUsage validates that parameters referenced in the Pipeline are declared by the Pipeline +func (ps *PipelineSpec) validatePipelineParameterUsage(ctx context.Context) (errs *apis.FieldError) { + errs = errs.Also(PipelineTaskList(ps.Tasks).validateUsageOfDeclaredPipelineTaskParameters(ctx, "tasks")) + errs = errs.Also(PipelineTaskList(ps.Finally).validateUsageOfDeclaredPipelineTaskParameters(ctx, "finally")) + errs = errs.Also(validatePipelineTaskParameterUsage(ps.Tasks, ps.Params).ViaField("tasks")) + errs = errs.Also(validatePipelineTaskParameterUsage(ps.Finally, ps.Params).ViaField("finally")) + return errs +} + +// validatePipelineTaskParameterUsage validates that parameters referenced in the Pipeline Tasks are declared by the Pipeline +func validatePipelineTaskParameterUsage(tasks []PipelineTask, params ParamSpecs) (errs *apis.FieldError) { + allParamNames := sets.NewString(params.getNames()...) + _, arrayParams, objectParams := params.sortByType() + arrayParamNames := sets.NewString(arrayParams.getNames()...) + objectParameterNameKeys := map[string][]string{} + for _, p := range objectParams { + for k := range p.Properties { + objectParameterNameKeys[p.Name] = append(objectParameterNameKeys[p.Name], k) + } + } + errs = errs.Also(validatePipelineParametersVariables(tasks, "params", allParamNames, arrayParamNames, objectParameterNameKeys)) + for i, task := range tasks { + errs = errs.Also(task.Params.validateDuplicateParameters().ViaFieldIndex("params", i)) } + return errs +} + +// validatePipelineWorkspacesUsage validates that workspaces referenced in the Pipeline are declared by the Pipeline +func (ps *PipelineSpec) validatePipelineWorkspacesUsage() (errs *apis.FieldError) { + errs = errs.Also(validatePipelineTasksWorkspacesUsage(ps.Workspaces, ps.Tasks).ViaField("tasks")) + errs = errs.Also(validatePipelineTasksWorkspacesUsage(ps.Workspaces, ps.Finally).ViaField("finally")) + return errs +} + +// validatePipelineTasksWorkspacesUsage validates that all the referenced workspaces (by pipeline tasks) are specified in +// the pipeline +func validatePipelineTasksWorkspacesUsage(wss []PipelineWorkspaceDeclaration, pts []PipelineTask) (errs *apis.FieldError) { workspaceNames := sets.NewString() for _, ws := range wss { workspaceNames.Insert(ws.Name) @@ -304,37 +400,16 @@ func validatePipelineWorkspacesUsage(ctx context.Context, wss []PipelineWorkspac // ValidatePipelineParameterVariables validates parameters with those specified by each pipeline task, // (1) it validates the type of parameter is either string or array (2) parameter default value matches -// with the type of that param (3) ensures that the referenced param variable is defined is part of the param declarations -func ValidatePipelineParameterVariables(ctx context.Context, tasks []PipelineTask, params []ParamSpec) (errs *apis.FieldError) { - parameterNames := sets.NewString() - arrayParameterNames := sets.NewString() - objectParameterNameKeys := map[string][]string{} - +// with the type of that param +func ValidatePipelineParameterVariables(ctx context.Context, tasks []PipelineTask, params ParamSpecs) (errs *apis.FieldError) { // validates all the types within a slice of ParamSpecs errs = errs.Also(ValidateParameterTypes(ctx, params).ViaField("params")) - - for _, p := range params { - if parameterNames.Has(p.Name) { - errs = errs.Also(apis.ErrGeneric("parameter appears more than once", "").ViaFieldKey("params", p.Name)) - } - // Add parameter name to parameterNames, and to arrayParameterNames if type is array. - parameterNames.Insert(p.Name) - if p.Type == ParamTypeArray { - arrayParameterNames.Insert(p.Name) - } - - if p.Type == ParamTypeObject { - for k := range p.Properties { - objectParameterNameKeys[p.Name] = append(objectParameterNameKeys[p.Name], k) - } - } - } - if config.ValidateParameterVariablesAndWorkspaces(ctx) { - errs = errs.Also(validatePipelineParametersVariables(tasks, "params", parameterNames, arrayParameterNames, objectParameterNameKeys)) + errs = errs.Also(params.validateNoDuplicateNames()) + for i, task := range tasks { + errs = errs.Also(task.Params.validateDuplicateParameters().ViaField("params").ViaIndex(i)) } return errs } - func validatePipelineParametersVariables(tasks []PipelineTask, prefix string, paramNames sets.String, arrayParamNames sets.String, objectParamNameKeys map[string][]string) (errs *apis.FieldError) { for idx, task := range tasks { errs = errs.Also(validatePipelineParametersVariablesInTaskParameters(task.Params, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaIndex(idx)) @@ -493,7 +568,7 @@ func validateExecutionStatusVariablesExpressions(expressions []string, ptNames s } func validatePipelineContextVariablesInParamValues(paramValues []string, prefix string, contextNames sets.String) (errs *apis.FieldError) { for _, paramValue := range paramValues { - errs = errs.Also(substitution.ValidateVariableP(paramValue, prefix, contextNames).ViaField("value")) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(paramValue, prefix, contextNames).ViaField("value")) } return errs } @@ -675,20 +750,11 @@ func validateResultsFromMatrixedPipelineTasksNotConsumed(tasks []PipelineTask, f return errs } -// ValidateParamArrayIndex validates if the param reference to an array param is out of bound. -// error is returned when the array indexing reference is out of bound of the array param -// e.g. if a param reference of $(params.array-param[2]) and the array param is of length 2. -func (ps *PipelineSpec) ValidateParamArrayIndex(ctx context.Context, params Params) error { - if !config.CheckAlphaOrBetaAPIFields(ctx) { - return nil - } - - // Collect all array params lengths - arrayParamsLengths := ps.Params.extractParamArrayLengths() - for k, v := range params.extractParamArrayLengths() { - arrayParamsLengths[k] = v - } - +// GetIndexingReferencesToArrayParams returns all strings referencing indices of PipelineRun array parameters +// from parameters, workspaces, and when expressions defined in the Pipeline's Tasks and Finally Tasks. +// For example, if a Task in the Pipeline has a parameter with a value "$(params.array-param-name[1])", +// this would be one of the strings returned. +func (ps *PipelineSpec) GetIndexingReferencesToArrayParams() sets.String { paramsRefs := []string{} for i := range ps.Tasks { paramsRefs = append(paramsRefs, ps.Tasks[i].Params.extractValues()...) @@ -703,22 +769,20 @@ func (ps *PipelineSpec) ValidateParamArrayIndex(ctx context.Context, params Para paramsRefs = append(paramsRefs, wes.Values...) } } - for i := range ps.Finally { paramsRefs = append(paramsRefs, ps.Finally[i].Params.extractValues()...) if ps.Finally[i].IsMatrixed() { paramsRefs = append(paramsRefs, ps.Finally[i].Matrix.Params.extractValues()...) } for _, wes := range ps.Finally[i].When { + paramsRefs = append(paramsRefs, wes.Input) paramsRefs = append(paramsRefs, wes.Values...) } } - // extract all array indexing references, for example []{"$(params.array-params[1])"} arrayIndexParamRefs := []string{} for _, p := range paramsRefs { arrayIndexParamRefs = append(arrayIndexParamRefs, extractArrayIndexingParamRefs(p)...) } - - return validateOutofBoundArrayParams(arrayIndexParamRefs, arrayParamsLengths) + return sets.NewString(arrayIndexParamRefs...) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_defaults.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_defaults.go index e53efe5e5a..19f00f700b 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_defaults.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_defaults.go @@ -18,19 +18,32 @@ package v1 import ( "context" + "regexp" "time" "github.com/tektoncd/pipeline/pkg/apis/config" + "github.com/tektoncd/pipeline/pkg/apis/pipeline" "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "knative.dev/pkg/apis" + "knative.dev/pkg/kmap" ) -var _ apis.Defaultable = (*PipelineRun)(nil) +var ( + _ apis.Defaultable = (*PipelineRun)(nil) + filterReservedAnnotationRegexp = regexp.MustCompile(pipeline.TektonReservedAnnotationExpr) +) // SetDefaults implements apis.Defaultable func (pr *PipelineRun) SetDefaults(ctx context.Context) { pr.Spec.SetDefaults(ctx) + + // Silently filtering out Tekton Reserved annotations at creation + if apis.IsInCreate(ctx) { + pr.ObjectMeta.Annotations = kmap.Filter(pr.ObjectMeta.Annotations, func(s string) bool { + return filterReservedAnnotationRegexp.MatchString(s) + }) + } } // SetDefaults implements apis.Defaultable diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_types.go index cc8ad99264..88120472ea 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_types.go @@ -18,6 +18,7 @@ package v1 import ( "context" + "fmt" "time" "github.com/tektoncd/pipeline/pkg/apis/config" @@ -153,6 +154,22 @@ func (pr *PipelineRun) GetNamespacedName() types.NamespacedName { return types.NamespacedName{Namespace: pr.Namespace, Name: pr.Name} } +// IsTimeoutConditionSet returns true when the pipelinerun has the pipelinerun timed out reason +func (pr *PipelineRun) IsTimeoutConditionSet() bool { + condition := pr.Status.GetCondition(apis.ConditionSucceeded) + return condition.IsFalse() && condition.Reason == PipelineRunReasonTimedOut.String() +} + +// SetTimeoutCondition sets the status of the PipelineRun to timed out. +func (pr *PipelineRun) SetTimeoutCondition(ctx context.Context) { + pr.Status.SetCondition(&apis.Condition{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionFalse, + Reason: PipelineRunReasonTimedOut.String(), + Message: fmt.Sprintf("PipelineRun %q failed to finish within %q", pr.Name, pr.PipelineTimeout(ctx).String()), + }) +} + // HasTimedOut returns true if a pipelinerun has exceeded its spec.Timeout based on its status.Timeout func (pr *PipelineRun) HasTimedOut(ctx context.Context, c clock.PassiveClock) bool { timeout := pr.PipelineTimeout(ctx) @@ -170,6 +187,19 @@ func (pr *PipelineRun) HasTimedOut(ctx context.Context, c clock.PassiveClock) bo return false } +// HasTimedOutForALongTime returns true if a pipelinerun has exceeed its spec.Timeout based its status.StartTime +// by a large margin +func (pr *PipelineRun) HasTimedOutForALongTime(ctx context.Context, c clock.PassiveClock) bool { + if !pr.HasTimedOut(ctx, c) { + return false + } + timeout := pr.PipelineTimeout(ctx) + startTime := pr.Status.StartTime + runtime := c.Since(startTime.Time) + // We are arbitrarily defining large margin as doubling the spec.timeout + return runtime >= 2*timeout +} + // HaveTasksTimedOut returns true if a pipelinerun has exceeded its spec.Timeouts.Tasks func (pr *PipelineRun) HaveTasksTimedOut(ctx context.Context, c clock.PassiveClock) bool { timeout := pr.TasksTimeout() diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_validation.go index d5e8361f8c..a072eef22f 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_validation.go @@ -66,8 +66,12 @@ func (ps *PipelineRunSpec) Validate(ctx context.Context) (errs *apis.FieldError) // Validate PipelineSpec if it's present if ps.PipelineSpec != nil { - ctx = config.SkipValidationDueToPropagatedParametersAndWorkspaces(ctx, true) errs = errs.Also(ps.PipelineSpec.Validate(ctx).ViaField("pipelineSpec")) + // Validate beta fields separately for inline Pipeline definitions. + // This prevents validation from failing in the reconciler when a Pipeline is converted to a different API version. + // See https://github.com/tektoncd/pipeline/issues/6616 for more information. + // TODO(#6592): Decouple API versioning from feature versioning + errs = errs.Also(ps.PipelineSpec.ValidateBetaFields(ctx).ViaField("pipelineSpec")) } // Validate PipelineRun parameters @@ -148,66 +152,62 @@ func (ps *PipelineRunSpec) validateInlineParameters(ctx context.Context) (errs * if ps.PipelineSpec == nil { return errs } - var paramSpec []ParamSpec + paramSpecForValidation := make(map[string]ParamSpec) for _, p := range ps.Params { - pSpec := ParamSpec{ - Name: p.Name, - Default: &p.Value, + paramSpecForValidation = createParamSpecFromParam(p, paramSpecForValidation) + } + for _, p := range ps.PipelineSpec.Params { + var err *apis.FieldError + paramSpecForValidation, err = combineParamSpec(p, paramSpecForValidation) + if err != nil { + errs = errs.Also(err) } - paramSpec = append(paramSpec, pSpec) } - paramSpec = appendParamSpec(paramSpec, ps.PipelineSpec.Params) for _, pt := range ps.PipelineSpec.Tasks { - paramSpec = appendParam(paramSpec, pt.Params) + paramSpecForValidation = appendPipelineTaskParams(paramSpecForValidation, pt.Params) if pt.TaskSpec != nil && pt.TaskSpec.Params != nil { - paramSpec = appendParamSpec(paramSpec, pt.TaskSpec.Params) + for _, p := range pt.TaskSpec.Params { + var err *apis.FieldError + paramSpecForValidation, err = combineParamSpec(p, paramSpecForValidation) + if err != nil { + errs = errs.Also(err) + } + } } } + var paramSpec []ParamSpec + for _, v := range paramSpecForValidation { + paramSpec = append(paramSpec, v) + } if ps.PipelineSpec != nil && ps.PipelineSpec.Tasks != nil { for _, pt := range ps.PipelineSpec.Tasks { if pt.TaskSpec != nil && pt.TaskSpec.Steps != nil { - errs = errs.Also(ValidateParameterVariables( - config.SkipValidationDueToPropagatedParametersAndWorkspaces(ctx, false), pt.TaskSpec.Steps, paramSpec)) + errs = errs.Also(ValidateParameterTypes(ctx, paramSpec)) + errs = errs.Also(ValidateParameterVariables(ctx, pt.TaskSpec.Steps, paramSpec)) + errs = errs.Also(ValidateUsageOfDeclaredParameters(ctx, pt.TaskSpec.Steps, paramSpec)) } } + errs = errs.Also(ValidatePipelineParameterVariables(ctx, ps.PipelineSpec.Tasks, paramSpec)) + errs = errs.Also(validatePipelineTaskParameterUsage(ps.PipelineSpec.Tasks, paramSpec)) } return errs } -func appendParamSpec(paramSpec []ParamSpec, params []ParamSpec) []ParamSpec { +func appendPipelineTaskParams(paramSpecForValidation map[string]ParamSpec, params Params) map[string]ParamSpec { for _, p := range params { - skip := false - for _, ps := range paramSpec { - if ps.Name == p.Name { - skip = true - break - } - } - if !skip { - paramSpec = append(paramSpec, p) - } - } - return paramSpec -} - -func appendParam(paramSpec []ParamSpec, params Params) []ParamSpec { - for _, p := range params { - skip := false - for _, ps := range paramSpec { - if ps.Name == p.Name { - skip = true - break - } - } - if !skip { - pSpec := ParamSpec{ - Name: p.Name, - Default: &p.Value, + if pSpec, ok := paramSpecForValidation[p.Name]; ok { + if p.Value.ObjectVal != nil { + for k, v := range p.Value.ObjectVal { + pSpec.Default.ObjectVal[k] = v + pSpec.Properties[k] = PropertySpec{Type: ParamTypeString} + } } - paramSpec = append(paramSpec, pSpec) + paramSpecForValidation[p.Name] = pSpec + } else { + paramSpecForValidation = createParamSpecFromParam(p, paramSpecForValidation) } } - return paramSpec + return paramSpecForValidation } func validateSpecStatus(status PipelineRunSpecStatus) *apis.FieldError { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/result_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/result_validation.go index 1fd9ddd6b1..0d19c2ab0a 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/result_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/result_validation.go @@ -16,18 +16,10 @@ package v1 import ( "context" "fmt" - "regexp" - "github.com/tektoncd/pipeline/pkg/apis/config" - "github.com/tektoncd/pipeline/pkg/apis/version" "knative.dev/pkg/apis" ) -// ResultNameFormat Constant used to define the regex Result.Name should follow -const ResultNameFormat = `^([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$` - -var resultNameFormatRegex = regexp.MustCompile(ResultNameFormat) - // Validate implements apis.Validatable func (tr TaskResult) Validate(ctx context.Context) (errs *apis.FieldError) { if !resultNameFormatRegex.MatchString(tr.Name) { @@ -35,15 +27,11 @@ func (tr TaskResult) Validate(ctx context.Context) (errs *apis.FieldError) { } switch { - // Object results is beta feature - check if the feature flag is set to "beta" or "alpha" case tr.Type == ResultsTypeObject: errs := validateObjectResult(tr) - errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "results type", config.BetaAPIFields)) return errs - // Array results is a beta feature - check if the feature flag is set to "beta" or "alpha" case tr.Type == ResultsTypeArray: - errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "results type", config.BetaAPIFields)) - return errs + return nil // Resources created before the result. Type was introduced may not have Type set // and should be considered valid case tr.Type == "": diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/resultref.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/resultref.go index 1dcd06e6e2..5a7610e032 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/resultref.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/resultref.go @@ -39,19 +39,26 @@ const ( objectResultExpressionFormat = "tasks..results.." // ResultTaskPart Constant used to define the "tasks" part of a pipeline result reference ResultTaskPart = "tasks" - // ResultFinallyPart Constant used to define the "finally" part of a task result reference + // ResultFinallyPart Constant used to define the "finally" part of a pipeline result reference ResultFinallyPart = "finally" // ResultResultPart Constant used to define the "results" part of a pipeline result reference ResultResultPart = "results" // TODO(#2462) use one regex across all substitutions // variableSubstitutionFormat matches format like $result.resultname, $result.resultname[int] and $result.resultname[*] variableSubstitutionFormat = `\$\([_a-zA-Z0-9.-]+(\.[_a-zA-Z0-9.-]+)*(\[([0-9]+|\*)\])?\)` + // exactVariableSubstitutionFormat matches strings that only contain a single reference to result or param variables, but nothing else + // i.e. `$(result.resultname)` is a match, but `foo $(result.resultname)` is not. + exactVariableSubstitutionFormat = `^\$\([_a-zA-Z0-9.-]+(\.[_a-zA-Z0-9.-]+)*(\[([0-9]+|\*)\])?\)$` // arrayIndexing will match all `[int]` and `[*]` for parseExpression arrayIndexing = `\[([0-9])*\*?\]` + // ResultNameFormat Constant used to define the regex Result.Name should follow + ResultNameFormat = `^([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$` ) // VariableSubstitutionRegex is a regex to find all result matching substitutions var VariableSubstitutionRegex = regexp.MustCompile(variableSubstitutionFormat) +var exactVariableSubstitutionRegex = regexp.MustCompile(exactVariableSubstitutionFormat) +var resultNameFormatRegex = regexp.MustCompile(ResultNameFormat) // arrayIndexingRegex is used to match `[int]` and `[*]` var arrayIndexingRegex = regexp.MustCompile(arrayIndexing) @@ -180,7 +187,7 @@ func parseExpression(substitutionExpression string) (string, string, int, string return subExpressions[1], subExpressions[3], 0, subExpressions[4], nil } } - return "", "", 0, "", fmt.Errorf("Must be one of the form 1). %q; 2). %q", resultExpressionFormat, objectResultExpressionFormat) + return "", "", 0, "", fmt.Errorf("must be one of the form 1). %q; 2). %q", resultExpressionFormat, objectResultExpressionFormat) } // ParseResultName parse the input string to extract resultName and result index. diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/swagger.json b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/swagger.json index f7b05c14f2..5a71f5f50c 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/swagger.json +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/swagger.json @@ -917,7 +917,7 @@ "$ref": "#/definitions/v1.EmbeddedTask" }, "timeout": { - "description": "Time after which the TaskRun times out. Defaults to 1 hour. Specified TaskRun timeout should be less than 24h. Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", + "description": "Time after which the TaskRun times out. Defaults to 1 hour. Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", "$ref": "#/definitions/v1.Duration" }, "when": { @@ -1905,7 +1905,7 @@ "$ref": "#/definitions/v1.TaskSpec" }, "timeout": { - "description": "Time after which one retry attempt times out. Defaults to 1 hour. Specified build timeout should be less than 24h. Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", + "description": "Time after which one retry attempt times out. Defaults to 1 hour. Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", "$ref": "#/definitions/v1.Duration" }, "workspaces": { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/task_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/task_validation.go index 5848053bf6..8a051618fe 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/task_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/task_validation.go @@ -62,8 +62,16 @@ var objectVariableNameFormatRegex = regexp.MustCompile(objectVariableNameFormat) // Validate implements apis.Validatable func (t *Task) Validate(ctx context.Context) *apis.FieldError { errs := validate.ObjectMetadata(t.GetObjectMeta()).ViaField("metadata") - ctx = config.SkipValidationDueToPropagatedParametersAndWorkspaces(ctx, false) - return errs.Also(t.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec")) + errs = errs.Also(t.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec")) + // When a Task is created directly, instead of declared inline in a TaskRun or PipelineRun, + // we do not support propagated parameters. Validate that all params it uses are declared. + errs = errs.Also(ValidateUsageOfDeclaredParameters(ctx, t.Spec.Steps, t.Spec.Params).ViaField("spec")) + // Validate beta fields when a Task is defined, but not as part of validating a Task spec. + // This prevents validation from failing when a Task is converted to a different API version. + // See https://github.com/tektoncd/pipeline/issues/6616 for more information. + // TODO(#6592): Decouple API versioning from feature versioning + errs = errs.Also(t.Spec.ValidateBetaFields(ctx)) + return errs } // Validate implements apis.Validatable @@ -72,14 +80,6 @@ func (ts *TaskSpec) Validate(ctx context.Context) (errs *apis.FieldError) { errs = errs.Also(apis.ErrMissingField("steps")) } - if config.IsSubstituted(ctx) { - // Validate the task's workspaces only. - errs = errs.Also(validateDeclaredWorkspaces(ts.Workspaces, ts.Steps, ts.StepTemplate).ViaField("workspaces")) - errs = errs.Also(validateWorkspaceUsages(ctx, ts)) - - return errs - } - errs = errs.Also(ValidateVolumes(ts.Volumes).ViaField("volumes")) errs = errs.Also(validateDeclaredWorkspaces(ts.Workspaces, ts.Steps, ts.StepTemplate).ViaField("workspaces")) errs = errs.Also(validateWorkspaceUsages(ctx, ts)) @@ -102,6 +102,57 @@ func (ts *TaskSpec) Validate(ctx context.Context) (errs *apis.FieldError) { return errs } +// ValidateBetaFields returns an error if the Task spec uses beta features but does not +// have "enable-api-fields" set to "alpha" or "beta". +func (ts *TaskSpec) ValidateBetaFields(ctx context.Context) *apis.FieldError { + var errs *apis.FieldError + // Object parameters + for i, p := range ts.Params { + if p.Type == ParamTypeObject { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "object type parameter", config.BetaAPIFields).ViaFieldIndex("params", i)) + } + } + // Indexing into array parameters + arrayIndexParamRefs := ts.GetIndexingReferencesToArrayParams() + if len(arrayIndexParamRefs) != 0 { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "indexing into array parameters", config.BetaAPIFields)) + } + // Array and object results + for i, result := range ts.Results { + switch result.Type { + case ResultsTypeObject: + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "object results", config.BetaAPIFields).ViaFieldIndex("results", i)) + case ResultsTypeArray: + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "array results", config.BetaAPIFields).ViaFieldIndex("results", i)) + case ResultsTypeString: + default: + } + } + return errs +} + +// ValidateUsageOfDeclaredParameters validates that all parameters referenced in the Task are declared by the Task. +func ValidateUsageOfDeclaredParameters(ctx context.Context, steps []Step, params ParamSpecs) *apis.FieldError { + var errs *apis.FieldError + _, _, objectParams := params.sortByType() + allParameterNames := sets.NewString(params.getNames()...) + errs = errs.Also(validateVariables(ctx, steps, "params", allParameterNames)) + errs = errs.Also(validateObjectUsage(ctx, steps, objectParams)) + errs = errs.Also(validateObjectParamsHaveProperties(ctx, params)) + return errs +} + +// validateObjectParamsHaveProperties returns an error if any declared object params are missing properties +func validateObjectParamsHaveProperties(ctx context.Context, params ParamSpecs) *apis.FieldError { + var errs *apis.FieldError + for _, p := range params { + if p.Type == ParamTypeObject && p.Properties == nil { + errs = errs.Also(apis.ErrMissingField(fmt.Sprintf("%s.properties", p.Name))) + } + } + return errs +} + func validateSidecarNames(sidecars []Sidecar) (errs *apis.FieldError) { for _, sc := range sidecars { if sc.Name == pipeline.ReservedResultsSidecarName { @@ -157,8 +208,8 @@ func validateDeclaredWorkspaces(workspaces []WorkspaceDeclaration, steps []Step, // validateWorkspaceUsages checks that all WorkspaceUsage objects in Steps // refer to workspaces that are defined in the Task. // -// This is an alpha feature and will fail validation if it's used by a step -// or sidecar when the enable-api-fields feature gate is anything but "alpha". +// This is a beta feature and will fail validation if it's used by a step +// or sidecar when the enable-api-fields feature gate is anything but "beta". func validateWorkspaceUsages(ctx context.Context, ts *TaskSpec) (errs *apis.FieldError) { workspaces := ts.Workspaces steps := ts.Steps @@ -171,7 +222,7 @@ func validateWorkspaceUsages(ctx context.Context, ts *TaskSpec) (errs *apis.Fiel for stepIdx, step := range steps { if len(step.Workspaces) != 0 { - errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "step workspaces", config.AlphaAPIFields).ViaIndex(stepIdx).ViaField("steps")) + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "step workspaces", config.BetaAPIFields).ViaIndex(stepIdx).ViaField("steps")) } for workspaceIdx, w := range step.Workspaces { if !wsNames.Has(w.Name) { @@ -182,7 +233,7 @@ func validateWorkspaceUsages(ctx context.Context, ts *TaskSpec) (errs *apis.Fiel for sidecarIdx, sidecar := range sidecars { if len(sidecar.Workspaces) != 0 { - errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "sidecar workspaces", config.AlphaAPIFields).ViaIndex(sidecarIdx).ViaField("sidecars")) + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "sidecar workspaces", config.BetaAPIFields).ViaIndex(sidecarIdx).ViaField("sidecars")) } for workspaceIdx, w := range sidecar.Workspaces { if !wsNames.Has(w.Name) { @@ -294,11 +345,6 @@ func validateStep(ctx context.Context, s Step, names sets.String) (errs *apis.Fi // ValidateParameterTypes validates all the types within a slice of ParamSpecs func ValidateParameterTypes(ctx context.Context, params []ParamSpec) (errs *apis.FieldError) { for _, p := range params { - if p.Type == ParamTypeObject { - // Object type parameter is a beta feature and will fail validation if it's used in a task spec - // when the enable-api-fields feature gate is not "alpha" or "beta". - errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "object type parameter", config.BetaAPIFields)) - } errs = errs.Also(p.ValidateType(ctx)) } return errs @@ -337,14 +383,6 @@ func (p ParamSpec) ValidateType(ctx context.Context) *apis.FieldError { // definition of `properties` section and the type of a PropertySpec is allowed. // (Currently, only string is allowed) func (p ParamSpec) ValidateObjectType(ctx context.Context) *apis.FieldError { - if p.Type == ParamTypeObject && p.Properties == nil { - // If this we are not skipping validation checks due to propagated params - // then properties field is required. - if config.ValidateParameterVariablesAndWorkspaces(ctx) { - return apis.ErrMissingField(fmt.Sprintf("%s.properties", p.Name)) - } - } - invalidKeys := []string{} for key, propertySpec := range p.Properties { if propertySpec.Type != ParamTypeString { @@ -363,38 +401,17 @@ func (p ParamSpec) ValidateObjectType(ctx context.Context) *apis.FieldError { } // ValidateParameterVariables validates all variables within a slice of ParamSpecs against a slice of Steps -func ValidateParameterVariables(ctx context.Context, steps []Step, params []ParamSpec) *apis.FieldError { - allParameterNames := sets.NewString() - stringParameterNames := sets.NewString() - arrayParameterNames := sets.NewString() - objectParamSpecs := []ParamSpec{} +func ValidateParameterVariables(ctx context.Context, steps []Step, params ParamSpecs) *apis.FieldError { var errs *apis.FieldError - for _, p := range params { - // validate no duplicate names - if allParameterNames.Has(p.Name) { - errs = errs.Also(apis.ErrGeneric("parameter appears more than once", "").ViaFieldKey("params", p.Name)) - } - allParameterNames.Insert(p.Name) - - switch p.Type { - case ParamTypeArray: - arrayParameterNames.Insert(p.Name) - case ParamTypeObject: - objectParamSpecs = append(objectParamSpecs, p) - case ParamTypeString: - fallthrough - default: - stringParameterNames.Insert(p.Name) - } - } - errs = errs.Also(validateNameFormat(stringParameterNames.Insert(arrayParameterNames.List()...), objectParamSpecs)) - if config.ValidateParameterVariablesAndWorkspaces(ctx) { - errs = errs.Also(validateVariables(ctx, steps, "params", allParameterNames)) - errs = errs.Also(validateObjectUsage(ctx, steps, objectParamSpecs)) - } + errs = errs.Also(params.validateNoDuplicateNames()) + stringParams, arrayParams, objectParams := params.sortByType() + stringParameterNames := sets.NewString(stringParams.getNames()...) + arrayParameterNames := sets.NewString(arrayParams.getNames()...) + errs = errs.Also(validateNameFormat(stringParameterNames.Insert(arrayParameterNames.List()...), objectParams)) return errs.Also(validateArrayUsage(steps, "params", arrayParameterNames)) } +// validateTaskContextVariables returns an error if any Steps reference context variables that don't exist. func validateTaskContextVariables(ctx context.Context, steps []Step) *apis.FieldError { taskRunContextNames := sets.NewString().Insert( "name", @@ -416,7 +433,7 @@ func validateTaskResultsVariables(ctx context.Context, steps []Step, results []T resultsNames.Insert(r.Name) } for idx, step := range steps { - errs = errs.Also(validateTaskVariable(step.Script, "results", resultsNames).ViaField("script").ViaFieldIndex("steps", idx)) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(step.Script, "results", resultsNames).ViaField("script").ViaFieldIndex("steps", idx)) } return errs } @@ -441,8 +458,7 @@ func validateObjectUsage(ctx context.Context, steps []Step, params []ParamSpec) return errs.Also(validateObjectUsageAsWhole(steps, "params", objectParameterNames)) } -// validateObjectUsageAsWhole makes sure the object params are not used as whole when providing values for strings -// i.e. param.objectParam, param.objectParam[*] +// validateObjectUsageAsWhole returns an error if the Steps contain references to the entire input object params in fields where these references are prohibited func validateObjectUsageAsWhole(steps []Step, prefix string, vars sets.String) (errs *apis.FieldError) { for idx, step := range steps { errs = errs.Also(validateStepObjectUsageAsWhole(step, prefix, vars)).ViaFieldIndex("steps", idx) @@ -450,59 +466,62 @@ func validateObjectUsageAsWhole(steps []Step, prefix string, vars sets.String) ( return errs } +// validateStepObjectUsageAsWhole returns an error if the Step contains references to the entire input object params in fields where these references are prohibited func validateStepObjectUsageAsWhole(step Step, prefix string, vars sets.String) *apis.FieldError { - errs := validateTaskNoObjectReferenced(step.Name, prefix, vars).ViaField("name") - errs = errs.Also(validateTaskNoObjectReferenced(step.Image, prefix, vars).ViaField("image")) - errs = errs.Also(validateTaskNoObjectReferenced(step.WorkingDir, prefix, vars).ViaField("workingDir")) - errs = errs.Also(validateTaskNoObjectReferenced(step.Script, prefix, vars).ViaField("script")) + errs := substitution.ValidateNoReferencesToEntireProhibitedVariables(step.Name, prefix, vars).ViaField("name") + errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(step.Image, prefix, vars).ViaField("image")) + errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(step.WorkingDir, prefix, vars).ViaField("workingDir")) + errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(step.Script, prefix, vars).ViaField("script")) for i, cmd := range step.Command { - errs = errs.Also(validateTaskNoObjectReferenced(cmd, prefix, vars).ViaFieldIndex("command", i)) + errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(cmd, prefix, vars).ViaFieldIndex("command", i)) } for i, arg := range step.Args { - errs = errs.Also(validateTaskNoObjectReferenced(arg, prefix, vars).ViaFieldIndex("args", i)) + errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(arg, prefix, vars).ViaFieldIndex("args", i)) } for _, env := range step.Env { - errs = errs.Also(validateTaskNoObjectReferenced(env.Value, prefix, vars).ViaFieldKey("env", env.Name)) + errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(env.Value, prefix, vars).ViaFieldKey("env", env.Name)) } for i, v := range step.VolumeMounts { - errs = errs.Also(validateTaskNoObjectReferenced(v.Name, prefix, vars).ViaField("name").ViaFieldIndex("volumeMount", i)) - errs = errs.Also(validateTaskNoObjectReferenced(v.MountPath, prefix, vars).ViaField("mountPath").ViaFieldIndex("volumeMount", i)) - errs = errs.Also(validateTaskNoObjectReferenced(v.SubPath, prefix, vars).ViaField("subPath").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(v.Name, prefix, vars).ViaField("name").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(v.MountPath, prefix, vars).ViaField("mountPath").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(v.SubPath, prefix, vars).ViaField("subPath").ViaFieldIndex("volumeMount", i)) } return errs } -func validateArrayUsage(steps []Step, prefix string, vars sets.String) (errs *apis.FieldError) { +// validateArrayUsage returns an error if the Steps contain references to the input array params in fields where these references are prohibited +func validateArrayUsage(steps []Step, prefix string, arrayParamNames sets.String) (errs *apis.FieldError) { for idx, step := range steps { - errs = errs.Also(validateStepArrayUsage(step, prefix, vars)).ViaFieldIndex("steps", idx) + errs = errs.Also(validateStepArrayUsage(step, prefix, arrayParamNames)).ViaFieldIndex("steps", idx) } return errs } -func validateStepArrayUsage(step Step, prefix string, vars sets.String) *apis.FieldError { - errs := validateTaskNoArrayReferenced(step.Name, prefix, vars).ViaField("name") - errs = errs.Also(validateTaskNoArrayReferenced(step.Image, prefix, vars).ViaField("image")) - errs = errs.Also(validateTaskNoArrayReferenced(step.WorkingDir, prefix, vars).ViaField("workingDir")) - errs = errs.Also(validateTaskNoArrayReferenced(step.Script, prefix, vars).ViaField("script")) +// validateStepArrayUsage returns an error if the Step contains references to the input array params in fields where these references are prohibited +func validateStepArrayUsage(step Step, prefix string, arrayParamNames sets.String) *apis.FieldError { + errs := substitution.ValidateNoReferencesToProhibitedVariables(step.Name, prefix, arrayParamNames).ViaField("name") + errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(step.Image, prefix, arrayParamNames).ViaField("image")) + errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(step.WorkingDir, prefix, arrayParamNames).ViaField("workingDir")) + errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(step.Script, prefix, arrayParamNames).ViaField("script")) for i, cmd := range step.Command { - errs = errs.Also(validateTaskArraysIsolated(cmd, prefix, vars).ViaFieldIndex("command", i)) + errs = errs.Also(substitution.ValidateVariableReferenceIsIsolated(cmd, prefix, arrayParamNames).ViaFieldIndex("command", i)) } for i, arg := range step.Args { - errs = errs.Also(validateTaskArraysIsolated(arg, prefix, vars).ViaFieldIndex("args", i)) + errs = errs.Also(substitution.ValidateVariableReferenceIsIsolated(arg, prefix, arrayParamNames).ViaFieldIndex("args", i)) } for _, env := range step.Env { - errs = errs.Also(validateTaskNoArrayReferenced(env.Value, prefix, vars).ViaFieldKey("env", env.Name)) + errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(env.Value, prefix, arrayParamNames).ViaFieldKey("env", env.Name)) } for i, v := range step.VolumeMounts { - errs = errs.Also(validateTaskNoArrayReferenced(v.Name, prefix, vars).ViaField("name").ViaFieldIndex("volumeMount", i)) - errs = errs.Also(validateTaskNoArrayReferenced(v.MountPath, prefix, vars).ViaField("mountPath").ViaFieldIndex("volumeMount", i)) - errs = errs.Also(validateTaskNoArrayReferenced(v.SubPath, prefix, vars).ViaField("subPath").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(v.Name, prefix, arrayParamNames).ViaField("name").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(v.MountPath, prefix, arrayParamNames).ViaField("mountPath").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(v.SubPath, prefix, arrayParamNames).ViaField("subPath").ViaFieldIndex("volumeMount", i)) } return errs } +// validateVariables returns an error if the Steps contain references to any unknown variables func validateVariables(ctx context.Context, steps []Step, prefix string, vars sets.String) (errs *apis.FieldError) { - // We've checked param name format. Now, we want to check if param names are referenced correctly in each step for idx, step := range steps { errs = errs.Also(validateStepVariables(ctx, step, prefix, vars).ViaFieldIndex("steps", idx)) } @@ -558,68 +577,41 @@ func validateNameFormat(stringAndArrayParams sets.String, objectParams []ParamSp return errs } +// validateStepVariables returns an error if the Step contains references to any unknown variables func validateStepVariables(ctx context.Context, step Step, prefix string, vars sets.String) *apis.FieldError { - errs := validateTaskVariable(step.Name, prefix, vars).ViaField("name") - errs = errs.Also(validateTaskVariable(step.Image, prefix, vars).ViaField("image")) - errs = errs.Also(validateTaskVariable(step.WorkingDir, prefix, vars).ViaField("workingDir")) - errs = errs.Also(validateTaskVariable(step.Script, prefix, vars).ViaField("script")) + errs := substitution.ValidateNoReferencesToUnknownVariables(step.Name, prefix, vars).ViaField("name") + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(step.Image, prefix, vars).ViaField("image")) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(step.WorkingDir, prefix, vars).ViaField("workingDir")) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(step.Script, prefix, vars).ViaField("script")) for i, cmd := range step.Command { - errs = errs.Also(validateTaskVariable(cmd, prefix, vars).ViaFieldIndex("command", i)) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(cmd, prefix, vars).ViaFieldIndex("command", i)) } for i, arg := range step.Args { - errs = errs.Also(validateTaskVariable(arg, prefix, vars).ViaFieldIndex("args", i)) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(arg, prefix, vars).ViaFieldIndex("args", i)) } for _, env := range step.Env { - errs = errs.Also(validateTaskVariable(env.Value, prefix, vars).ViaFieldKey("env", env.Name)) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(env.Value, prefix, vars).ViaFieldKey("env", env.Name)) } for i, v := range step.VolumeMounts { - errs = errs.Also(validateTaskVariable(v.Name, prefix, vars).ViaField("name").ViaFieldIndex("volumeMount", i)) - errs = errs.Also(validateTaskVariable(v.MountPath, prefix, vars).ViaField("MountPath").ViaFieldIndex("volumeMount", i)) - errs = errs.Also(validateTaskVariable(v.SubPath, prefix, vars).ViaField("SubPath").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(v.Name, prefix, vars).ViaField("name").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(v.MountPath, prefix, vars).ViaField("MountPath").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(v.SubPath, prefix, vars).ViaField("SubPath").ViaFieldIndex("volumeMount", i)) } - errs = errs.Also(validateTaskVariable(string(step.OnError), prefix, vars).ViaField("onError")) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(string(step.OnError), prefix, vars).ViaField("onError")) return errs } -func validateTaskVariable(value, prefix string, vars sets.String) *apis.FieldError { - return substitution.ValidateVariableP(value, prefix, vars) -} - -func validateTaskNoObjectReferenced(value, prefix string, objectNames sets.String) *apis.FieldError { - return substitution.ValidateEntireVariableProhibitedP(value, prefix, objectNames) -} - -func validateTaskNoArrayReferenced(value, prefix string, arrayNames sets.String) *apis.FieldError { - return substitution.ValidateVariableProhibitedP(value, prefix, arrayNames) -} - -func validateTaskArraysIsolated(value, prefix string, arrayNames sets.String) *apis.FieldError { - return substitution.ValidateVariableIsolatedP(value, prefix, arrayNames) -} - // isParamRefs attempts to check if a specified string looks like it contains any parameter reference // This is useful to make sure the specified value looks like a Parameter Reference before performing any strict validation func isParamRefs(s string) bool { return strings.HasPrefix(s, "$("+ParamsPrefix) } -// ValidateParamArrayIndex validates if the param reference to an array param is out of bound. -// error is returned when the array indexing reference is out of bound of the array param -// e.g. if a param reference of $(params.array-param[2]) and the array param is of length 2. -// - `trParams` are params from taskrun. -// - `taskSpec` contains params declarations. -func (ts *TaskSpec) ValidateParamArrayIndex(ctx context.Context, params Params) error { - cfg := config.FromContextOrDefaults(ctx) - if cfg.FeatureFlags.EnableAPIFields != config.AlphaAPIFields { - return nil - } - - // Collect all array params lengths - arrayParamsLengths := ts.Params.extractParamArrayLengths() - for k, v := range params.extractParamArrayLengths() { - arrayParamsLengths[k] = v - } - +// GetIndexingReferencesToArrayParams returns all strings referencing indices of TaskRun array parameters +// from parameters, workspaces, and when expressions defined in the Task. +// For example, if a Task has a parameter with a value "$(params.array-param-name[1])", +// this would be one of the strings returned. +func (ts *TaskSpec) GetIndexingReferencesToArrayParams() sets.String { // collect all the possible places to use param references paramsRefs := []string{} paramsRefs = append(paramsRefs, extractParamRefsFromSteps(ts.Steps)...) @@ -629,12 +621,10 @@ func (ts *TaskSpec) ValidateParamArrayIndex(ctx context.Context, params Params) paramsRefs = append(paramsRefs, v.MountPath) } paramsRefs = append(paramsRefs, extractParamRefsFromSidecars(ts.Sidecars)...) - // extract all array indexing references, for example []{"$(params.array-params[1])"} arrayIndexParamRefs := []string{} for _, p := range paramsRefs { arrayIndexParamRefs = append(arrayIndexParamRefs, extractArrayIndexingParamRefs(p)...) } - - return validateOutofBoundArrayParams(arrayIndexParamRefs, arrayParamsLengths) + return sets.NewString(arrayIndexParamRefs...) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskref_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskref_types.go index 2bb395dac2..f7558333cf 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskref_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskref_types.go @@ -42,6 +42,9 @@ type TaskKind string const ( // NamespacedTaskKind indicates that the task type has a namespaced scope. NamespacedTaskKind TaskKind = "Task" + // ClusterTaskRefKind is the task type for a reference to a task with cluster scope. + // ClusterTasks are not supported in v1, but v1 types may reference ClusterTasks. + ClusterTaskRefKind TaskKind = "ClusterTask" ) // IsCustomTask checks whether the reference is to a Custom Task diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskref_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskref_validation.go index 02dca53018..104af5d045 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskref_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskref_validation.go @@ -18,9 +18,11 @@ package v1 import ( "context" + "strings" "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/version" + "k8s.io/apimachinery/pkg/util/validation" "knative.dev/pkg/apis" ) @@ -31,7 +33,8 @@ func (ref *TaskRef) Validate(ctx context.Context) (errs *apis.FieldError) { return } - if ref.Resolver != "" || ref.Params != nil { + switch { + case ref.Resolver != "" || ref.Params != nil: if ref.Resolver != "" { errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "resolver", config.BetaAPIFields).ViaField("resolver")) if ref.Name != "" { @@ -48,8 +51,13 @@ func (ref *TaskRef) Validate(ctx context.Context) (errs *apis.FieldError) { } errs = errs.Also(ValidateParameters(ctx, ref.Params)) } - } else if ref.Name == "" { + case ref.Name != "": + // TaskRef name must be a valid k8s name + if errSlice := validation.IsQualifiedName(ref.Name); len(errSlice) != 0 { + errs = errs.Also(apis.ErrInvalidValue(strings.Join(errSlice, ","), "name")) + } + default: errs = errs.Also(apis.ErrMissingField("name")) } - return + return errs } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_defaults.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_defaults.go index 9f34e6b5c7..79fa50f3cd 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_defaults.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_defaults.go @@ -24,6 +24,7 @@ import ( pod "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "knative.dev/pkg/apis" + "knative.dev/pkg/kmap" ) var _ apis.Defaultable = (*TaskRun)(nil) @@ -36,6 +37,13 @@ func (tr *TaskRun) SetDefaults(ctx context.Context) { ctx = apis.WithinParent(ctx, tr.ObjectMeta) tr.Spec.SetDefaults(ctx) + // Silently filtering out Tekton Reserved annotations at creation + if apis.IsInCreate(ctx) { + tr.ObjectMeta.Annotations = kmap.Filter(tr.ObjectMeta.Annotations, func(s string) bool { + return filterReservedAnnotationRegexp.MatchString(s) + }) + } + // If the TaskRun doesn't have a managed-by label, apply the default // specified in the config. cfg := config.FromContextOrDefaults(ctx) diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_types.go index 7375c01ff5..30095d5352 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_types.go @@ -55,7 +55,6 @@ type TaskRunSpec struct { // +optional Retries int `json:"retries,omitempty"` // Time after which one retry attempt times out. Defaults to 1 hour. - // Specified build timeout should be less than 24h. // Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration // +optional Timeout *metav1.Duration `json:"timeout,omitempty"` @@ -381,11 +380,16 @@ func (tr *TaskRun) HasStarted() bool { return tr.Status.StartTime != nil && !tr.Status.StartTime.IsZero() } -// IsSuccessful returns true if the TaskRun's status indicates that it is done. +// IsSuccessful returns true if the TaskRun's status indicates that it has succeeded. func (tr *TaskRun) IsSuccessful() bool { return tr != nil && tr.Status.GetCondition(apis.ConditionSucceeded).IsTrue() } +// IsFailure returns true if the TaskRun's status indicates that it has failed. +func (tr *TaskRun) IsFailure() bool { + return tr != nil && tr.Status.GetCondition(apis.ConditionSucceeded).IsFalse() +} + // IsCancelled returns true if the TaskRun's spec status is set to Cancelled state func (tr *TaskRun) IsCancelled() bool { return tr.Spec.Status == TaskRunSpecStatusCancelled diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_validation.go index 055094bb6a..c818a5760c 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_validation.go @@ -62,9 +62,12 @@ func (ts *TaskRunSpec) Validate(ctx context.Context) (errs *apis.FieldError) { } // Validate TaskSpec if it's present. if ts.TaskSpec != nil { - // skip validation of parameter and workspaces variables since we validate them via taskrunspec below. - ctx = config.SkipValidationDueToPropagatedParametersAndWorkspaces(ctx, true) errs = errs.Also(ts.TaskSpec.Validate(ctx).ViaField("taskSpec")) + // Validate beta fields separately for inline Task definitions. + // This prevents validation from failing in the reconciler when a Task is converted to a different API version. + // See https://github.com/tektoncd/pipeline/issues/6616 for more information. + // TODO(#6592): Decouple API versioning from feature versioning + errs = errs.Also(ts.TaskSpec.ValidateBetaFields(ctx).ViaField("taskSpec")) } errs = errs.Also(ValidateParameters(ctx, ts.Params).ViaField("params")) @@ -141,7 +144,8 @@ func (ts *TaskRunSpec) validateInlineParameters(ctx context.Context) (errs *apis } if ts.TaskSpec != nil && ts.TaskSpec.Steps != nil { errs = errs.Also(ValidateParameterTypes(ctx, paramSpec)) - errs = errs.Also(ValidateParameterVariables(config.SkipValidationDueToPropagatedParametersAndWorkspaces(ctx, false), ts.TaskSpec.Steps, paramSpec)) + errs = errs.Also(ValidateParameterVariables(ctx, ts.TaskSpec.Steps, paramSpec)) + errs = errs.Also(ValidateUsageOfDeclaredParameters(ctx, ts.TaskSpec.Steps, paramSpec)) } return errs } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/when_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/when_types.go index 7e0ef91473..58af7273d2 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/when_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/when_types.go @@ -102,9 +102,9 @@ func (wes WhenExpressions) AllowsExecution() bool { return true } -// ReplaceWhenExpressionsVariables interpolates variables, such as Parameters and Results, in +// ReplaceVariables interpolates variables, such as Parameters and Results, in // the Input and Values. -func (wes WhenExpressions) ReplaceWhenExpressionsVariables(replacements map[string]string, arrayReplacements map[string][]string) WhenExpressions { +func (wes WhenExpressions) ReplaceVariables(replacements map[string]string, arrayReplacements map[string][]string) WhenExpressions { replaced := wes for i := range wes { replaced[i] = wes[i].applyReplacements(replacements, arrayReplacements) diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/workspace_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/workspace_validation.go index 4be7c58582..57c60467f3 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/workspace_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/workspace_validation.go @@ -19,8 +19,6 @@ package v1 import ( "context" - "github.com/tektoncd/pipeline/pkg/apis/config" - "github.com/tektoncd/pipeline/pkg/apis/version" "k8s.io/apimachinery/pkg/api/equality" "knative.dev/pkg/apis" ) @@ -68,25 +66,15 @@ func (b *WorkspaceBinding) Validate(ctx context.Context) (errs *apis.FieldError) return apis.ErrMissingField("secret.secretName") } - // The projected workspace is only supported when the beta feature gate is enabled. // For a Projected volume to work, you must provide at least one source. if b.Projected != nil && len(b.Projected.Sources) == 0 { - errs := version.ValidateEnabledAPIFields(ctx, "projected workspace type", config.BetaAPIFields).ViaField("workspaces") - if errs != nil { - return errs - } if len(b.Projected.Sources) == 0 { return apis.ErrMissingField("projected.sources") } } - // The csi workspace is only supported when the beta feature gate is enabled. // For a CSI to work, you must provide and have installed the driver to use. if b.CSI != nil { - errs := version.ValidateEnabledAPIFields(ctx, "csi workspace type", config.BetaAPIFields).ViaField("workspaces") - if errs != nil { - return errs - } if b.CSI.Driver == "" { return apis.ErrMissingField("csi.driver") } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/run_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/run_types.go index 68198cf44f..b32dd8ad2a 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/run_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/run_types.go @@ -230,7 +230,7 @@ func (r *Run) HasStarted() bool { return r.Status.StartTime != nil && !r.Status.StartTime.IsZero() } -// IsSuccessful returns true if the Run's status indicates that it is done. +// IsSuccessful returns true if the Run's status indicates that it has succeeded. func (r *Run) IsSuccessful() bool { return r != nil && r.Status.GetCondition(apis.ConditionSucceeded).IsTrue() } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/cluster_task_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/cluster_task_validation.go index 342f3717be..5dfa6278c4 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/cluster_task_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/cluster_task_validation.go @@ -19,7 +19,6 @@ package v1beta1 import ( "context" - "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/validate" "knative.dev/pkg/apis" ) @@ -32,6 +31,8 @@ func (t *ClusterTask) Validate(ctx context.Context) *apis.FieldError { return nil } errs := validate.ObjectMetadata(t.GetObjectMeta()).ViaField("metadata") - ctx = config.SkipValidationDueToPropagatedParametersAndWorkspaces(ctx, false) - return errs.Also(t.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec")) + errs = errs.Also(t.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec")) + // We do not support propagated parameters in ClusterTasks. + // Validate that all params the ClusterTask uses are declared. + return errs.Also(ValidateUsageOfDeclaredParameters(ctx, t.Spec.Steps, t.Spec.Params)) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/container_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/container_conversion.go index 816e4e9918..5bf1365fc7 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/container_conversion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/container_conversion.go @@ -47,10 +47,6 @@ func (s Step) convertTo(ctx context.Context, sink *v1.Step) { sink.OnError = (v1.OnErrorType)(s.OnError) sink.StdoutConfig = (*v1.StepOutputConfig)(s.StdoutConfig) sink.StderrConfig = (*v1.StepOutputConfig)(s.StderrConfig) - - // TODO(#4546): Handle deprecated fields - // Ports, LivenessProbe, ReadinessProbe, StartupProbe, Lifecycle, TerminationMessagePath - // TerminationMessagePolicy, Stdin, StdinOnce, TTY } func (s *Step) convertFrom(ctx context.Context, source v1.Step) { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/customrun_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/customrun_types.go index 233270037d..e74f48342b 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/customrun_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/customrun_types.go @@ -223,11 +223,16 @@ func (r *CustomRun) HasStarted() bool { return r.Status.StartTime != nil && !r.Status.StartTime.IsZero() } -// IsSuccessful returns true if the CustomRun's status indicates that it is done. +// IsSuccessful returns true if the CustomRun's status indicates that it has succeeded. func (r *CustomRun) IsSuccessful() bool { return r != nil && r.Status.GetCondition(apis.ConditionSucceeded).IsTrue() } +// IsFailure returns true if the CustomRun's status indicates that it has failed. +func (r *CustomRun) IsFailure() bool { + return r != nil && r.Status.GetCondition(apis.ConditionSucceeded).IsFalse() +} + // GetCustomRunKey return the customrun's key for timeout handler map func (r *CustomRun) GetCustomRunKey() string { // The address of the pointer is a threadsafe unique identifier for the customrun diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/matrix_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/matrix_types.go index f1c86d4e06..19042e11e4 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/matrix_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/matrix_types.go @@ -300,29 +300,17 @@ func (m *Matrix) validateCombinationsCount(ctx context.Context) (errs *apis.Fiel return errs } -// validateParams validates the type of Parameter for Matrix.Params and Matrix.Include.Params -// Matrix.Params must be of type array. Matrix.Include.Params must be of type string. -// validateParams also validates Matrix.Params for a unique list of params +// validateUniqueParams validates Matrix.Params for a unique list of params // and a unique list of params in each Matrix.Include.Params specification -func (m *Matrix) validateParams() (errs *apis.FieldError) { +func (m *Matrix) validateUniqueParams() (errs *apis.FieldError) { if m != nil { if m.HasInclude() { for i, include := range m.Include { errs = errs.Also(include.Params.validateDuplicateParameters().ViaField(fmt.Sprintf("matrix.include[%d].params", i))) - for _, param := range include.Params { - if param.Value.Type != ParamTypeString { - errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("parameters of type string only are allowed, but got param type %s", string(param.Value.Type)), "").ViaFieldKey("matrix.include.params", param.Name)) - } - } } } if m.HasParams() { errs = errs.Also(m.Params.validateDuplicateParameters().ViaField("matrix.params")) - for _, param := range m.Params { - if param.Value.Type != ParamTypeArray { - errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("parameters of type array only are allowed, but got param type %s", string(param.Value.Type)), "").ViaFieldKey("matrix.params", param.Name)) - } - } } } return errs diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/openapi_generated.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/openapi_generated.go index df879cd588..ce92c8d8ce 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/openapi_generated.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/openapi_generated.go @@ -1404,7 +1404,7 @@ func schema_pkg_apis_pipeline_v1beta1_Pipeline(ref common.ReferenceCallback) com return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "Pipeline describes a list of Tasks to execute. It expresses how outputs of tasks feed into inputs of subsequent tasks.", + Description: "Pipeline describes a list of Tasks to execute. It expresses how outputs of tasks feed into inputs of subsequent tasks.\n\nDeprecated: Please use v1.Pipeline instead.", Type: []string{"object"}, Properties: map[string]spec.Schema{ "kind": { @@ -1673,7 +1673,7 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineRun(ref common.ReferenceCallback) return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "PipelineRun represents a single execution of a Pipeline. PipelineRuns are how the graph of Tasks declared in a Pipeline are executed; they specify inputs to Pipelines such as parameter values and capture operational aspects of the Tasks execution such as service account and tolerations. Creating a PipelineRun creates TaskRuns for Tasks in the referenced Pipeline.", + Description: "PipelineRun represents a single execution of a Pipeline. PipelineRuns are how the graph of Tasks declared in a Pipeline are executed; they specify inputs to Pipelines such as parameter values and capture operational aspects of the Tasks execution such as service account and tolerations. Creating a PipelineRun creates TaskRuns for Tasks in the referenced Pipeline.\n\nDeprecated: Please use v1.PipelineRun instead.", Type: []string{"object"}, Properties: map[string]spec.Schema{ "kind": { @@ -2632,7 +2632,7 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineTask(ref common.ReferenceCallback) }, "timeout": { SchemaProps: spec.SchemaProps{ - Description: "Time after which the TaskRun times out. Defaults to 1 hour. Specified TaskRun timeout should be less than 24h. Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", + Description: "Time after which the TaskRun times out. Defaults to 1 hour. Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, @@ -4206,7 +4206,7 @@ func schema_pkg_apis_pipeline_v1beta1_Task(ref common.ReferenceCallback) common. return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "Task represents a collection of sequential steps that are run as part of a Pipeline using a set of inputs and producing a set of outputs. Tasks execute when TaskRuns are created that provide the input parameters and resources and output resources the Task requires.", + Description: "Task represents a collection of sequential steps that are run as part of a Pipeline using a set of inputs and producing a set of outputs. Tasks execute when TaskRuns are created that provide the input parameters and resources and output resources the Task requires.\n\nDeprecated: Please use v1.Task instead.", Type: []string{"object"}, Properties: map[string]spec.Schema{ "kind": { @@ -4549,7 +4549,7 @@ func schema_pkg_apis_pipeline_v1beta1_TaskRun(ref common.ReferenceCallback) comm return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "TaskRun represents a single execution of a Task. TaskRuns are how the steps specified in a Task are executed; they specify the parameters and resources used to run the steps in a Task.", + Description: "TaskRun represents a single execution of a Task. TaskRuns are how the steps specified in a Task are executed; they specify the parameters and resources used to run the steps in a Task.\n\nDeprecated: Please use v1.TaskRun instead.", Type: []string{"object"}, Properties: map[string]spec.Schema{ "kind": { @@ -4956,7 +4956,7 @@ func schema_pkg_apis_pipeline_v1beta1_TaskRunSpec(ref common.ReferenceCallback) }, "timeout": { SchemaProps: spec.SchemaProps{ - Description: "Time after which one retry attempt times out. Defaults to 1 hour. Specified build timeout should be less than 24h. Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", + Description: "Time after which one retry attempt times out. Defaults to 1 hour. Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, @@ -6012,7 +6012,7 @@ func schema_pkg_apis_resolution_v1beta1_ResolutionRequestSpec(ref common.Referen Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param"), + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Param"), }, }, }, @@ -6022,7 +6022,7 @@ func schema_pkg_apis_resolution_v1beta1_ResolutionRequestSpec(ref common.Referen }, }, Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param"}, + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Param"}, } } @@ -6087,13 +6087,13 @@ func schema_pkg_apis_resolution_v1beta1_ResolutionRequestStatus(ref common.Refer "source": { SchemaProps: spec.SchemaProps{ Description: "Deprecated: Use RefSource instead", - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ConfigSource"), + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.RefSource"), }, }, "refSource": { SchemaProps: spec.SchemaProps{ Description: "RefSource is the source reference of the remote data that records the url, digest and the entrypoint.", - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.RefSource"), + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.RefSource"), }, }, }, @@ -6101,7 +6101,7 @@ func schema_pkg_apis_resolution_v1beta1_ResolutionRequestStatus(ref common.Refer }, }, Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ConfigSource", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.RefSource", "knative.dev/pkg/apis.Condition"}, + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.RefSource", "knative.dev/pkg/apis.Condition"}, } } @@ -6123,13 +6123,13 @@ func schema_pkg_apis_resolution_v1beta1_ResolutionRequestStatusFields(ref common "source": { SchemaProps: spec.SchemaProps{ Description: "Deprecated: Use RefSource instead", - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ConfigSource"), + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.RefSource"), }, }, "refSource": { SchemaProps: spec.SchemaProps{ Description: "RefSource is the source reference of the remote data that records the url, digest and the entrypoint.", - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.RefSource"), + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.RefSource"), }, }, }, @@ -6137,6 +6137,6 @@ func schema_pkg_apis_resolution_v1beta1_ResolutionRequestStatusFields(ref common }, }, Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ConfigSource", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.RefSource"}, + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.RefSource"}, } } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_conversion.go index 18de6bd71d..5ac4f50a86 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_conversion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_conversion.go @@ -77,7 +77,8 @@ func (p Param) convertTo(ctx context.Context, sink *v1.Param) { sink.Value = newValue } -func (p *Param) convertFrom(ctx context.Context, source v1.Param) { +// ConvertFrom converts v1beta1 Param from v1 Param +func (p *Param) ConvertFrom(ctx context.Context, source v1.Param) { p.Name = source.Name newValue := ParamValue{} newValue.convertFrom(ctx, source.Value) diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_types.go index a2d7e78f3d..c703fcd5a4 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_types.go @@ -94,6 +94,51 @@ func (pp *ParamSpec) SetDefaults(context.Context) { } } +// getNames returns all the names of the declared parameters +func (ps ParamSpecs) getNames() []string { + var names []string + for _, p := range ps { + names = append(names, p.Name) + } + return names +} + +// sortByType splits the input params into string params, array params, and object params, in that order +func (ps ParamSpecs) sortByType() (ParamSpecs, ParamSpecs, ParamSpecs) { + var stringParams, arrayParams, objectParams ParamSpecs + for _, p := range ps { + switch p.Type { + case ParamTypeArray: + arrayParams = append(arrayParams, p) + case ParamTypeObject: + objectParams = append(objectParams, p) + case ParamTypeString: + fallthrough + default: + stringParams = append(stringParams, p) + } + } + return stringParams, arrayParams, objectParams +} + +// validateNoDuplicateNames returns an error if any of the params have the same name +func (ps ParamSpecs) validateNoDuplicateNames() *apis.FieldError { + names := ps.getNames() + seen := sets.String{} + dups := sets.String{} + var errs *apis.FieldError + for _, n := range names { + if seen.Has(n) { + dups.Insert(n) + } + seen.Insert(n) + } + for n := range dups { + errs = errs.Also(apis.ErrGeneric("parameter appears more than once", "").ViaFieldKey("params", n)) + } + return errs +} + // setDefaultsForProperties sets default type for PropertySpec (string) if it's not specified func (pp *ParamSpec) setDefaultsForProperties() { for key, propertySpec := range pp.Properties { @@ -143,9 +188,9 @@ func (ps Params) extractParamMapArrVals() map[string][]string { return paramsMap } -// extractParamArrayLengths extract and return the lengths of all array params +// ExtractParamArrayLengths extract and return the lengths of all array params // Example of returned value: {"a-array-params": 2,"b-array-params": 2 } -func (ps Params) extractParamArrayLengths() map[string]int { +func (ps Params) ExtractParamArrayLengths() map[string]int { // Collect all array params arrayParamsLengths := make(map[string]int) @@ -171,9 +216,18 @@ func (ps Params) validateDuplicateParameters() (errs *apis.FieldError) { return errs } -// extractParamArrayLengths extract and return the lengths of all array params +// ReplaceVariables applies string, array and object replacements to variables in Params +func (ps Params) ReplaceVariables(stringReplacements map[string]string, arrayReplacements map[string][]string, objectReplacements map[string]map[string]string) Params { + params := ps.DeepCopy() + for i := range params { + params[i].Value.ApplyReplacements(stringReplacements, arrayReplacements, objectReplacements) + } + return params +} + +// ExtractDefaultParamArrayLengths extract and return the lengths of all array param defaults // Example of returned value: {"a-array-params": 2,"b-array-params": 2 } -func (ps ParamSpecs) extractParamArrayLengths() map[string]int { +func (ps ParamSpecs) ExtractDefaultParamArrayLengths() map[string]int { // Collect all array params arrayParamsLengths := make(map[string]int) @@ -188,30 +242,6 @@ func (ps ParamSpecs) extractParamArrayLengths() map[string]int { return arrayParamsLengths } -// validateOutofBoundArrayParams validates if the array indexing params are out of bound -// example of arrayIndexingParams: ["$(params.a-array-param[1])", "$(params.b-array-param[2])"] -// example of arrayParamsLengths: {"a-array-params": 2,"b-array-params": 2 } -func validateOutofBoundArrayParams(arrayIndexingParams []string, arrayParamsLengths map[string]int) error { - outofBoundParams := sets.String{} - for _, val := range arrayIndexingParams { - indexString := substitution.ExtractIndexString(val) - idx, _ := substitution.ExtractIndex(indexString) - // this will extract the param name from reference - // e.g. $(params.a-array-param[1]) -> a-array-param - paramName, _, _ := substitution.ExtractVariablesFromString(substitution.TrimArrayIndex(val), "params") - - if paramLength, ok := arrayParamsLengths[paramName[0]]; ok { - if idx >= paramLength { - outofBoundParams.Insert(val) - } - } - } - if outofBoundParams.Len() > 0 { - return fmt.Errorf("non-existent param references:%v", outofBoundParams.List()) - } - return nil -} - // extractArrayIndexingParamRefs takes a string of the form `foo-$(params.array-param[1])-bar` and extracts the portions of the string that reference an element in an array param. // For example, for the string “foo-$(params.array-param[1])-bar-$(params.other-array-param[2])-$(params.string-param)`, // it would return ["$(params.array-param[1])", "$(params.other-array-param[2])"]. @@ -569,23 +599,23 @@ func validateParamStringValue(param Param, prefix string, paramNames sets.String // validateStringVariable validates the normal string fields that can only accept references to string param or individual keys of object param func validateStringVariable(value, prefix string, stringVars sets.String, arrayVars sets.String, objectParamNameKeys map[string][]string) *apis.FieldError { - errs := substitution.ValidateVariableP(value, prefix, stringVars) + errs := substitution.ValidateNoReferencesToUnknownVariables(value, prefix, stringVars) errs = errs.Also(validateObjectVariable(value, prefix, objectParamNameKeys)) - return errs.Also(substitution.ValidateVariableProhibitedP(value, prefix, arrayVars)) + return errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(value, prefix, arrayVars)) } func validateArrayVariable(value, prefix string, stringVars sets.String, arrayVars sets.String, objectParamNameKeys map[string][]string) *apis.FieldError { - errs := substitution.ValidateVariableP(value, prefix, stringVars) + errs := substitution.ValidateNoReferencesToUnknownVariables(value, prefix, stringVars) errs = errs.Also(validateObjectVariable(value, prefix, objectParamNameKeys)) - return errs.Also(substitution.ValidateVariableIsolatedP(value, prefix, arrayVars)) + return errs.Also(substitution.ValidateVariableReferenceIsIsolated(value, prefix, arrayVars)) } func validateObjectVariable(value, prefix string, objectParamNameKeys map[string][]string) (errs *apis.FieldError) { objectNames := sets.NewString() for objectParamName, keys := range objectParamNameKeys { objectNames.Insert(objectParamName) - errs = errs.Also(substitution.ValidateVariableP(value, fmt.Sprintf("%s\\.%s", prefix, objectParamName), sets.NewString(keys...))) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(value, fmt.Sprintf("%s\\.%s", prefix, objectParamName), sets.NewString(keys...))) } - return errs.Also(substitution.ValidateEntireVariableProhibitedP(value, prefix, objectNames)) + return errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(value, prefix, objectNames)) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_conversion.go index 0c66a3bd13..d6b4c770ee 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_conversion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_conversion.go @@ -20,6 +20,8 @@ import ( "context" "fmt" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" "knative.dev/pkg/apis" ) @@ -34,20 +36,20 @@ func (p *Pipeline) ConvertTo(ctx context.Context, to apis.Convertible) error { switch sink := to.(type) { case *v1.Pipeline: sink.ObjectMeta = p.ObjectMeta - return p.Spec.ConvertTo(ctx, &sink.Spec) + return p.Spec.ConvertTo(ctx, &sink.Spec, &sink.ObjectMeta) default: return fmt.Errorf("unknown version, got: %T", sink) } } // ConvertTo implements apis.Convertible -func (ps *PipelineSpec) ConvertTo(ctx context.Context, sink *v1.PipelineSpec) error { +func (ps *PipelineSpec) ConvertTo(ctx context.Context, sink *v1.PipelineSpec, meta *metav1.ObjectMeta) error { sink.DisplayName = ps.DisplayName sink.Description = ps.Description sink.Tasks = nil for _, t := range ps.Tasks { new := v1.PipelineTask{} - err := t.convertTo(ctx, &new) + err := t.convertTo(ctx, &new, meta) if err != nil { return err } @@ -74,7 +76,7 @@ func (ps *PipelineSpec) ConvertTo(ctx context.Context, sink *v1.PipelineSpec) er sink.Finally = nil for _, f := range ps.Finally { new := v1.PipelineTask{} - err := f.convertTo(ctx, &new) + err := f.convertTo(ctx, &new, meta) if err != nil { return err } @@ -88,20 +90,20 @@ func (p *Pipeline) ConvertFrom(ctx context.Context, from apis.Convertible) error switch source := from.(type) { case *v1.Pipeline: p.ObjectMeta = source.ObjectMeta - return p.Spec.ConvertFrom(ctx, &source.Spec) + return p.Spec.ConvertFrom(ctx, &source.Spec, &p.ObjectMeta) default: return fmt.Errorf("unknown version, got: %T", p) } } // ConvertFrom implements apis.Convertible -func (ps *PipelineSpec) ConvertFrom(ctx context.Context, source *v1.PipelineSpec) error { +func (ps *PipelineSpec) ConvertFrom(ctx context.Context, source *v1.PipelineSpec, meta *metav1.ObjectMeta) error { ps.DisplayName = source.DisplayName ps.Description = source.Description ps.Tasks = nil for _, t := range source.Tasks { new := PipelineTask{} - err := new.convertFrom(ctx, t) + err := new.convertFrom(ctx, t, meta) if err != nil { return err } @@ -128,7 +130,7 @@ func (ps *PipelineSpec) ConvertFrom(ctx context.Context, source *v1.PipelineSpec ps.Finally = nil for _, f := range source.Finally { new := PipelineTask{} - err := new.convertFrom(ctx, f) + err := new.convertFrom(ctx, f, meta) if err != nil { return err } @@ -137,7 +139,7 @@ func (ps *PipelineSpec) ConvertFrom(ctx context.Context, source *v1.PipelineSpec return nil } -func (pt PipelineTask) convertTo(ctx context.Context, sink *v1.PipelineTask) error { +func (pt PipelineTask) convertTo(ctx context.Context, sink *v1.PipelineTask, meta *metav1.ObjectMeta) error { sink.Name = pt.Name sink.DisplayName = pt.DisplayName sink.Description = pt.Description @@ -147,7 +149,7 @@ func (pt PipelineTask) convertTo(ctx context.Context, sink *v1.PipelineTask) err } if pt.TaskSpec != nil { sink.TaskSpec = &v1.EmbeddedTask{} - err := pt.TaskSpec.convertTo(ctx, sink.TaskSpec) + err := pt.TaskSpec.convertTo(ctx, sink.TaskSpec, meta, pt.Name) if err != nil { return err } @@ -183,18 +185,18 @@ func (pt PipelineTask) convertTo(ctx context.Context, sink *v1.PipelineTask) err return nil } -func (pt *PipelineTask) convertFrom(ctx context.Context, source v1.PipelineTask) error { +func (pt *PipelineTask) convertFrom(ctx context.Context, source v1.PipelineTask, meta *metav1.ObjectMeta) error { pt.Name = source.Name pt.DisplayName = source.DisplayName pt.Description = source.Description if source.TaskRef != nil { newTaskRef := TaskRef{} - newTaskRef.convertFrom(ctx, *source.TaskRef) + newTaskRef.ConvertFrom(ctx, *source.TaskRef) pt.TaskRef = &newTaskRef } if source.TaskSpec != nil { newTaskSpec := EmbeddedTask{} - err := newTaskSpec.convertFrom(ctx, *source.TaskSpec) + err := newTaskSpec.convertFrom(ctx, *source.TaskSpec, meta, pt.Name) pt.TaskSpec = &newTaskSpec if err != nil { return err @@ -211,7 +213,7 @@ func (pt *PipelineTask) convertFrom(ctx context.Context, source v1.PipelineTask) pt.Params = nil for _, p := range source.Params { new := Param{} - new.convertFrom(ctx, p) + new.ConvertFrom(ctx, p) pt.Params = append(pt.Params, new) } pt.Matrix = nil @@ -231,20 +233,20 @@ func (pt *PipelineTask) convertFrom(ctx context.Context, source v1.PipelineTask) return nil } -func (et EmbeddedTask) convertTo(ctx context.Context, sink *v1.EmbeddedTask) error { +func (et EmbeddedTask) convertTo(ctx context.Context, sink *v1.EmbeddedTask, meta *metav1.ObjectMeta, taskName string) error { sink.TypeMeta = et.TypeMeta sink.Spec = et.Spec sink.Metadata = v1.PipelineTaskMetadata(et.Metadata) sink.TaskSpec = v1.TaskSpec{} - return et.TaskSpec.ConvertTo(ctx, &sink.TaskSpec) + return et.TaskSpec.ConvertTo(ctx, &sink.TaskSpec, meta, taskName) } -func (et *EmbeddedTask) convertFrom(ctx context.Context, source v1.EmbeddedTask) error { +func (et *EmbeddedTask) convertFrom(ctx context.Context, source v1.EmbeddedTask, meta *metav1.ObjectMeta, taskName string) error { et.TypeMeta = source.TypeMeta et.Spec = source.Spec et.Metadata = PipelineTaskMetadata(source.Metadata) et.TaskSpec = TaskSpec{} - return et.TaskSpec.ConvertFrom(ctx, &source.TaskSpec) + return et.TaskSpec.ConvertFrom(ctx, &source.TaskSpec, meta, taskName) } func (we WhenExpression) convertTo(ctx context.Context, sink *v1.WhenExpression) { @@ -278,7 +280,7 @@ func (m *Matrix) convertTo(ctx context.Context, sink *v1.Matrix) { func (m *Matrix) convertFrom(ctx context.Context, source v1.Matrix) { for _, param := range source.Params { new := Param{} - new.convertFrom(ctx, param) + new.ConvertFrom(ctx, param) m.Params = append(m.Params, new) } @@ -286,7 +288,7 @@ func (m *Matrix) convertFrom(ctx context.Context, source v1.Matrix) { m.Include = append(m.Include, IncludeParams{Name: include.Name}) for _, p := range include.Params { new := Param{} - new.convertFrom(ctx, p) + new.ConvertFrom(ctx, p) m.Include[i].Params = append(m.Include[i].Params, new) } } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_types.go index efb6607dfc..4426d5ddb7 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_types.go @@ -39,10 +39,12 @@ const ( // +genclient:noStatus // +genreconciler:krshapedlogic=false // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:openapi-gen=true // Pipeline describes a list of Tasks to execute. It expresses how outputs // of tasks feed into inputs of subsequent tasks. -// +k8s:openapi-gen=true +// +// Deprecated: Please use v1.Pipeline instead. type Pipeline struct { metav1.TypeMeta `json:",inline"` // +optional @@ -215,7 +217,6 @@ type PipelineTask struct { Workspaces []WorkspacePipelineTaskBinding `json:"workspaces,omitempty"` // Time after which the TaskRun times out. Defaults to 1 hour. - // Specified TaskRun timeout should be less than 24h. // Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration // +optional Timeout *metav1.Duration `json:"timeout,omitempty"` diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_validation.go index c170e85a3b..6411d3f09d 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_validation.go @@ -47,8 +47,12 @@ func (p *Pipeline) SupportedVerbs() []admissionregistrationv1.OperationType { // that any references resources exist, that is done at run time. func (p *Pipeline) Validate(ctx context.Context) *apis.FieldError { errs := validate.ObjectMetadata(p.GetObjectMeta()).ViaField("metadata") - ctx = config.SkipValidationDueToPropagatedParametersAndWorkspaces(ctx, false) - return errs.Also(p.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec")) + errs = errs.Also(p.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec")) + // When a Pipeline is created directly, instead of declared inline in a PipelineRun, + // we do not support propagated parameters and workspaces. + // Validate that all params and workspaces it uses are declared. + errs = errs.Also(p.Spec.validatePipelineParameterUsage(ctx).ViaField("spec")) + return errs.Also(p.Spec.validatePipelineWorkspacesUsage().ViaField("spec")) } // Validate checks that taskNames in the Pipeline are valid and that the graph @@ -72,8 +76,6 @@ func (ps *PipelineSpec) Validate(ctx context.Context) (errs *apis.FieldError) { errs = errs.Also(validateExecutionStatusVariables(ps.Tasks, ps.Finally)) // Validate the pipeline's workspaces. errs = errs.Also(validatePipelineWorkspacesDeclarations(ps.Workspaces)) - errs = errs.Also(validatePipelineWorkspacesUsage(ctx, ps.Workspaces, ps.Tasks).ViaField("tasks")) - errs = errs.Also(validatePipelineWorkspacesUsage(ctx, ps.Workspaces, ps.Finally).ViaField("finally")) // Validate the pipeline's results errs = errs.Also(validatePipelineResults(ps.Results, ps.Tasks, ps.Finally)) errs = errs.Also(validateTasksAndFinallySection(ps)) @@ -111,6 +113,16 @@ func (l PipelineTaskList) Validate(ctx context.Context, taskNames sets.String, p return errs } +// validateUsageOfDeclaredPipelineTaskParameters validates that all parameters referenced in the pipeline Task are declared by the pipeline Task. +func (l PipelineTaskList) validateUsageOfDeclaredPipelineTaskParameters(ctx context.Context, path string) (errs *apis.FieldError) { + for i, t := range l { + if t.TaskSpec != nil { + errs = errs.Also(ValidateUsageOfDeclaredParameters(ctx, t.TaskSpec.Steps, t.TaskSpec.Params).ViaFieldIndex(path, i)) + } + } + return errs +} + // ValidateName checks whether the PipelineTask's name is a valid DNS label func (pt PipelineTask) ValidateName() *apis.FieldError { if err := validation.IsDNS1123Label(pt.Name); len(err) > 0 { @@ -134,12 +146,22 @@ func (pt PipelineTask) Validate(ctx context.Context) (errs *apis.FieldError) { if pt.Resources != nil { errs = errs.Also(apis.ErrDisallowedFields("resources")) } - + // taskKinds contains the kinds when the apiVersion is not set, they are not custom tasks, + // if apiVersion is set they are custom tasks. + taskKinds := map[TaskKind]bool{ + "": true, + NamespacedTaskKind: true, + ClusterTaskKind: true, + } cfg := config.FromContextOrDefaults(ctx) // Pipeline task having taskRef/taskSpec with APIVersion is classified as custom task switch { + case pt.TaskRef != nil && !taskKinds[pt.TaskRef.Kind]: + errs = errs.Also(pt.validateCustomTask()) case pt.TaskRef != nil && pt.TaskRef.APIVersion != "": errs = errs.Also(pt.validateCustomTask()) + case pt.TaskSpec != nil && !taskKinds[TaskKind(pt.TaskSpec.Kind)]: + errs = errs.Also(pt.validateCustomTask()) case pt.TaskSpec != nil && pt.TaskSpec.APIVersion != "": errs = errs.Also(pt.validateCustomTask()) // If EnableTektonOCIBundles feature flag is on, validate bundle specifications @@ -148,7 +170,7 @@ func (pt PipelineTask) Validate(ctx context.Context) (errs *apis.FieldError) { default: errs = errs.Also(pt.validateTask(ctx)) } - return + return //nolint:nakedret } func (pt *PipelineTask) validateMatrix(ctx context.Context) (errs *apis.FieldError) { @@ -157,9 +179,9 @@ func (pt *PipelineTask) validateMatrix(ctx context.Context) (errs *apis.FieldErr // when the enable-api-fields feature gate is anything but "alpha". errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "matrix", config.AlphaAPIFields)) errs = errs.Also(pt.Matrix.validateCombinationsCount(ctx)) + errs = errs.Also(pt.Matrix.validateUniqueParams()) } errs = errs.Also(pt.Matrix.validateParameterInOneOfMatrixOrParams(pt.Params)) - errs = errs.Also(pt.Matrix.validateParams()) return errs } @@ -266,24 +288,11 @@ func (pt PipelineTask) validateBundle() (errs *apis.FieldError) { // validateTask validates a pipeline task or a final task for taskRef and taskSpec func (pt PipelineTask) validateTask(ctx context.Context) (errs *apis.FieldError) { - cfg := config.FromContextOrDefaults(ctx) - // Validate TaskSpec if it's present if pt.TaskSpec != nil { errs = errs.Also(pt.TaskSpec.Validate(ctx).ViaField("taskSpec")) } if pt.TaskRef != nil { - if pt.TaskRef.Name != "" { - // TaskRef name must be a valid k8s name - if errSlice := validation.IsQualifiedName(pt.TaskRef.Name); len(errSlice) != 0 { - errs = errs.Also(apis.ErrInvalidValue(strings.Join(errSlice, ","), "taskRef.name")) - } - } else if pt.TaskRef.Resolver == "" { - errs = errs.Also(apis.ErrInvalidValue("taskRef must specify name", "taskRef.name")) - } - // fail if bundle is present when EnableTektonOCIBundles feature flag is off (as it won't be allowed nor used) - if !cfg.FeatureFlags.EnableTektonOCIBundles && pt.TaskRef.Bundle != "" { - errs = errs.Also(apis.ErrDisallowedFields("taskRef.bundle")) - } + errs = errs.Also(pt.TaskRef.Validate(ctx).ViaField("taskRef")) } return errs } @@ -307,12 +316,43 @@ func validatePipelineWorkspacesDeclarations(wss []PipelineWorkspaceDeclaration) return errs } -// validatePipelineWorkspacesUsage validates that all the referenced workspaces (by pipeline tasks) are specified in -// the pipeline -func validatePipelineWorkspacesUsage(ctx context.Context, wss []PipelineWorkspaceDeclaration, pts []PipelineTask) (errs *apis.FieldError) { - if !config.ValidateParameterVariablesAndWorkspaces(ctx) { - return nil +// validatePipelineParameterUsage validates that parameters referenced in the Pipeline are declared by the Pipeline +func (ps *PipelineSpec) validatePipelineParameterUsage(ctx context.Context) (errs *apis.FieldError) { + errs = errs.Also(PipelineTaskList(ps.Tasks).validateUsageOfDeclaredPipelineTaskParameters(ctx, "tasks")) + errs = errs.Also(PipelineTaskList(ps.Finally).validateUsageOfDeclaredPipelineTaskParameters(ctx, "finally")) + errs = errs.Also(validatePipelineTaskParameterUsage(ps.Tasks, ps.Params).ViaField("tasks")) + errs = errs.Also(validatePipelineTaskParameterUsage(ps.Finally, ps.Params).ViaField("finally")) + return errs +} + +// validatePipelineTaskParameterUsage validates that parameters referenced in the Pipeline Tasks are declared by the Pipeline +func validatePipelineTaskParameterUsage(tasks []PipelineTask, params ParamSpecs) (errs *apis.FieldError) { + allParamNames := sets.NewString(params.getNames()...) + _, arrayParams, objectParams := params.sortByType() + arrayParamNames := sets.NewString(arrayParams.getNames()...) + objectParameterNameKeys := map[string][]string{} + for _, p := range objectParams { + for k := range p.Properties { + objectParameterNameKeys[p.Name] = append(objectParameterNameKeys[p.Name], k) + } } + errs = errs.Also(validatePipelineParametersVariables(tasks, "params", allParamNames, arrayParamNames, objectParameterNameKeys)) + for i, task := range tasks { + errs = errs.Also(task.Params.validateDuplicateParameters().ViaFieldIndex("params", i)) + } + return errs +} + +// validatePipelineWorkspacesUsage validates that Workspaces referenced in the Pipeline are declared by the Pipeline +func (ps *PipelineSpec) validatePipelineWorkspacesUsage() (errs *apis.FieldError) { + errs = errs.Also(validatePipelineTasksWorkspacesUsage(ps.Workspaces, ps.Tasks).ViaField("tasks")) + errs = errs.Also(validatePipelineTasksWorkspacesUsage(ps.Workspaces, ps.Finally).ViaField("finally")) + return errs +} + +// validatePipelineTasksWorkspacesUsage validates that all the referenced workspaces (by pipeline tasks) are specified in +// the pipeline +func validatePipelineTasksWorkspacesUsage(wss []PipelineWorkspaceDeclaration, pts []PipelineTask) (errs *apis.FieldError) { workspaceNames := sets.NewString() for _, ws := range wss { workspaceNames.Insert(ws.Name) @@ -326,33 +366,13 @@ func validatePipelineWorkspacesUsage(ctx context.Context, wss []PipelineWorkspac // ValidatePipelineParameterVariables validates parameters with those specified by each pipeline task, // (1) it validates the type of parameter is either string or array (2) parameter default value matches -// with the type of that param (3) ensures that the referenced param variable is defined is part of the param declarations -func ValidatePipelineParameterVariables(ctx context.Context, tasks []PipelineTask, params []ParamSpec) (errs *apis.FieldError) { - parameterNames := sets.NewString() - arrayParameterNames := sets.NewString() - objectParameterNameKeys := map[string][]string{} - +// with the type of that param +func ValidatePipelineParameterVariables(ctx context.Context, tasks []PipelineTask, params ParamSpecs) (errs *apis.FieldError) { // validates all the types within a slice of ParamSpecs errs = errs.Also(ValidateParameterTypes(ctx, params).ViaField("params")) - - for _, p := range params { - if parameterNames.Has(p.Name) { - errs = errs.Also(apis.ErrGeneric("parameter appears more than once", "").ViaFieldKey("params", p.Name)) - } - // Add parameter name to parameterNames, and to arrayParameterNames if type is array. - parameterNames.Insert(p.Name) - if p.Type == ParamTypeArray { - arrayParameterNames.Insert(p.Name) - } - - if p.Type == ParamTypeObject { - for k := range p.Properties { - objectParameterNameKeys[p.Name] = append(objectParameterNameKeys[p.Name], k) - } - } - } - if config.ValidateParameterVariablesAndWorkspaces(ctx) { - errs = errs.Also(validatePipelineParametersVariables(tasks, "params", parameterNames, arrayParameterNames, objectParameterNameKeys)) + errs = errs.Also(params.validateNoDuplicateNames()) + for i, task := range tasks { + errs = errs.Also(task.Params.validateDuplicateParameters().ViaField("params").ViaIndex(i)) } return errs } @@ -516,7 +536,7 @@ func validateExecutionStatusVariablesExpressions(expressions []string, ptNames s func validatePipelineContextVariablesInParamValues(paramValues []string, prefix string, contextNames sets.String) (errs *apis.FieldError) { for _, paramValue := range paramValues { - errs = errs.Also(substitution.ValidateVariableP(paramValue, prefix, contextNames).ViaField("value")) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(paramValue, prefix, contextNames).ViaField("value")) } return errs } @@ -699,20 +719,11 @@ func validateResultsFromMatrixedPipelineTasksNotConsumed(tasks []PipelineTask, f return errs } -// ValidateParamArrayIndex validates if the param reference to an array param is out of bound. -// error is returned when the array indexing reference is out of bound of the array param -// e.g. if a param reference of $(params.array-param[2]) and the array param is of length 2. -func (ps *PipelineSpec) ValidateParamArrayIndex(ctx context.Context, params Params) error { - if !config.CheckAlphaOrBetaAPIFields(ctx) { - return nil - } - - // Collect all array params lengths - arrayParamsLengths := ps.Params.extractParamArrayLengths() - for k, v := range params.extractParamArrayLengths() { - arrayParamsLengths[k] = v - } - +// GetIndexingReferencesToArrayParams returns all strings referencing indices of PipelineRun array parameters +// from parameters, workspaces, and when expressions defined in the Pipeline's Tasks and Finally Tasks. +// For example, if a Task in the Pipeline has a parameter with a value "$(params.array-param-name[1])", +// this would be one of the strings returned. +func (ps *PipelineSpec) GetIndexingReferencesToArrayParams() sets.String { paramsRefs := []string{} for i := range ps.Tasks { paramsRefs = append(paramsRefs, ps.Tasks[i].Params.extractValues()...) @@ -727,22 +738,20 @@ func (ps *PipelineSpec) ValidateParamArrayIndex(ctx context.Context, params Para paramsRefs = append(paramsRefs, wes.Values...) } } - for i := range ps.Finally { paramsRefs = append(paramsRefs, ps.Finally[i].Params.extractValues()...) if ps.Finally[i].IsMatrixed() { paramsRefs = append(paramsRefs, ps.Finally[i].Matrix.Params.extractValues()...) } for _, wes := range ps.Finally[i].WhenExpressions { + paramsRefs = append(paramsRefs, wes.Input) paramsRefs = append(paramsRefs, wes.Values...) } } - // extract all array indexing references, for example []{"$(params.array-params[1])"} arrayIndexParamRefs := []string{} for _, p := range paramsRefs { arrayIndexParamRefs = append(arrayIndexParamRefs, extractArrayIndexingParamRefs(p)...) } - - return validateOutofBoundArrayParams(arrayIndexParamRefs, arrayParamsLengths) + return sets.NewString(arrayIndexParamRefs...) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_conversion.go index 68a475233f..0b3f45274c 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_conversion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_conversion.go @@ -20,6 +20,8 @@ import ( "context" "fmt" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" "knative.dev/pkg/apis" ) @@ -34,24 +36,24 @@ func (pr *PipelineRun) ConvertTo(ctx context.Context, to apis.Convertible) error switch sink := to.(type) { case *v1.PipelineRun: sink.ObjectMeta = pr.ObjectMeta - if err := pr.Status.convertTo(ctx, &sink.Status); err != nil { + if err := pr.Status.convertTo(ctx, &sink.Status, &sink.ObjectMeta); err != nil { return err } - return pr.Spec.ConvertTo(ctx, &sink.Spec) + return pr.Spec.ConvertTo(ctx, &sink.Spec, &sink.ObjectMeta) default: return fmt.Errorf("unknown version, got: %T", sink) } } // ConvertTo implements apis.Convertible -func (prs PipelineRunSpec) ConvertTo(ctx context.Context, sink *v1.PipelineRunSpec) error { +func (prs PipelineRunSpec) ConvertTo(ctx context.Context, sink *v1.PipelineRunSpec, meta *metav1.ObjectMeta) error { if prs.PipelineRef != nil { sink.PipelineRef = &v1.PipelineRef{} prs.PipelineRef.convertTo(ctx, sink.PipelineRef) } if prs.PipelineSpec != nil { sink.PipelineSpec = &v1.PipelineSpec{} - err := prs.PipelineSpec.ConvertTo(ctx, sink.PipelineSpec) + err := prs.PipelineSpec.ConvertTo(ctx, sink.PipelineSpec, meta) if err != nil { return err } @@ -94,17 +96,17 @@ func (pr *PipelineRun) ConvertFrom(ctx context.Context, from apis.Convertible) e switch source := from.(type) { case *v1.PipelineRun: pr.ObjectMeta = source.ObjectMeta - if err := pr.Status.convertFrom(ctx, &source.Status); err != nil { + if err := pr.Status.convertFrom(ctx, &source.Status, &pr.ObjectMeta); err != nil { return err } - return pr.Spec.ConvertFrom(ctx, &source.Spec) + return pr.Spec.ConvertFrom(ctx, &source.Spec, &pr.ObjectMeta) default: return fmt.Errorf("unknown version, got: %T", pr) } } // ConvertFrom implements apis.Convertible -func (prs *PipelineRunSpec) ConvertFrom(ctx context.Context, source *v1.PipelineRunSpec) error { +func (prs *PipelineRunSpec) ConvertFrom(ctx context.Context, source *v1.PipelineRunSpec, meta *metav1.ObjectMeta) error { if source.PipelineRef != nil { newPipelineRef := PipelineRef{} newPipelineRef.convertFrom(ctx, *source.PipelineRef) @@ -112,7 +114,7 @@ func (prs *PipelineRunSpec) ConvertFrom(ctx context.Context, source *v1.Pipeline } if source.PipelineSpec != nil { newPipelineSpec := PipelineSpec{} - err := newPipelineSpec.ConvertFrom(ctx, source.PipelineSpec) + err := newPipelineSpec.ConvertFrom(ctx, source.PipelineSpec, meta) if err != nil { return err } @@ -121,7 +123,7 @@ func (prs *PipelineRunSpec) ConvertFrom(ctx context.Context, source *v1.Pipeline prs.Params = nil for _, p := range source.Params { new := Param{} - new.convertFrom(ctx, p) + new.ConvertFrom(ctx, p) prs.Params = append(prs.Params, new) } prs.ServiceAccountName = source.TaskRunTemplate.ServiceAccountName @@ -135,7 +137,7 @@ func (prs *PipelineRunSpec) ConvertFrom(ctx context.Context, source *v1.Pipeline prs.Workspaces = nil for _, w := range source.Workspaces { new := WorkspaceBinding{} - new.convertFrom(ctx, w) + new.ConvertFrom(ctx, w) prs.Workspaces = append(prs.Workspaces, new) } prs.TaskRunSpecs = nil @@ -206,7 +208,7 @@ func (ptrs *PipelineTaskRunSpec) convertFrom(ctx context.Context, source v1.Pipe ptrs.ComputeResources = source.ComputeResources } -func (prs *PipelineRunStatus) convertTo(ctx context.Context, sink *v1.PipelineRunStatus) error { +func (prs *PipelineRunStatus) convertTo(ctx context.Context, sink *v1.PipelineRunStatus, meta *metav1.ObjectMeta) error { sink.Status = prs.Status sink.StartTime = prs.StartTime sink.CompletionTime = prs.CompletionTime @@ -218,7 +220,7 @@ func (prs *PipelineRunStatus) convertTo(ctx context.Context, sink *v1.PipelineRu } if prs.PipelineSpec != nil { sink.PipelineSpec = &v1.PipelineSpec{} - err := prs.PipelineSpec.ConvertTo(ctx, sink.PipelineSpec) + err := prs.PipelineSpec.ConvertTo(ctx, sink.PipelineSpec, meta) if err != nil { return err } @@ -244,7 +246,7 @@ func (prs *PipelineRunStatus) convertTo(ctx context.Context, sink *v1.PipelineRu return nil } -func (prs *PipelineRunStatus) convertFrom(ctx context.Context, source *v1.PipelineRunStatus) error { +func (prs *PipelineRunStatus) convertFrom(ctx context.Context, source *v1.PipelineRunStatus, meta *metav1.ObjectMeta) error { prs.Status = source.Status prs.StartTime = source.StartTime prs.CompletionTime = source.CompletionTime @@ -256,7 +258,7 @@ func (prs *PipelineRunStatus) convertFrom(ctx context.Context, source *v1.Pipeli } if source.PipelineSpec != nil { newPipelineSpec := PipelineSpec{} - err := newPipelineSpec.ConvertFrom(ctx, source.PipelineSpec) + err := newPipelineSpec.ConvertFrom(ctx, source.PipelineSpec, meta) if err != nil { return err } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_defaults.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_defaults.go index 86592824a2..0b3efc8f12 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_defaults.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_defaults.go @@ -18,19 +18,32 @@ package v1beta1 import ( "context" + "regexp" "time" "github.com/tektoncd/pipeline/pkg/apis/config" + "github.com/tektoncd/pipeline/pkg/apis/pipeline" pod "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "knative.dev/pkg/apis" + "knative.dev/pkg/kmap" ) -var _ apis.Defaultable = (*PipelineRun)(nil) +var ( + _ apis.Defaultable = (*PipelineRun)(nil) + filterReservedAnnotationRegexp = regexp.MustCompile(pipeline.TektonReservedAnnotationExpr) +) // SetDefaults implements apis.Defaultable func (pr *PipelineRun) SetDefaults(ctx context.Context) { pr.Spec.SetDefaults(ctx) + + // Silently filtering out Tekton Reserved annotations at creation + if apis.IsInCreate(ctx) { + pr.ObjectMeta.Annotations = kmap.Filter(pr.ObjectMeta.Annotations, func(s string) bool { + return filterReservedAnnotationRegexp.MatchString(s) + }) + } } // SetDefaults implements apis.Defaultable diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_types.go index 53c1f738c0..4c7ae06456 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_types.go @@ -38,6 +38,7 @@ import ( // +genclient // +genreconciler:krshapedlogic=false // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:openapi-gen=true // PipelineRun represents a single execution of a Pipeline. PipelineRuns are how // the graph of Tasks declared in a Pipeline are executed; they specify inputs @@ -45,7 +46,7 @@ import ( // Tasks execution such as service account and tolerations. Creating a // PipelineRun creates TaskRuns for Tasks in the referenced Pipeline. // -// +k8s:openapi-gen=true +// Deprecated: Please use v1.PipelineRun instead. type PipelineRun struct { metav1.TypeMeta `json:",inline"` // +optional diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_validation.go index ed7ac5cce1..307f7b8f2e 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_validation.go @@ -71,7 +71,6 @@ func (ps *PipelineRunSpec) Validate(ctx context.Context) (errs *apis.FieldError) // Validate PipelineSpec if it's present if ps.PipelineSpec != nil { - ctx = config.SkipValidationDueToPropagatedParametersAndWorkspaces(ctx, true) errs = errs.Also(ps.PipelineSpec.Validate(ctx).ViaField("pipelineSpec")) } @@ -232,12 +231,12 @@ func (ps *PipelineRunSpec) validateInlineParameters(ctx context.Context) (errs * for _, pt := range ps.PipelineSpec.Tasks { if pt.TaskSpec != nil && pt.TaskSpec.Steps != nil { errs = errs.Also(ValidateParameterTypes(ctx, paramSpec)) - errs = errs.Also(ValidateParameterVariables( - config.SkipValidationDueToPropagatedParametersAndWorkspaces(ctx, false), pt.TaskSpec.Steps, paramSpec)) + errs = errs.Also(ValidateParameterVariables(ctx, pt.TaskSpec.Steps, paramSpec)) + errs = errs.Also(ValidateUsageOfDeclaredParameters(ctx, pt.TaskSpec.Steps, paramSpec)) } } - errs = errs.Also(ValidatePipelineParameterVariables( - config.SkipValidationDueToPropagatedParametersAndWorkspaces(ctx, false), ps.PipelineSpec.Tasks, paramSpec)) + errs = errs.Also(ValidatePipelineParameterVariables(ctx, ps.PipelineSpec.Tasks, paramSpec)) + errs = errs.Also(validatePipelineTaskParameterUsage(ps.PipelineSpec.Tasks, paramSpec)) } return errs } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resolver_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resolver_conversion.go index 3bbed85032..93c6b7d2aa 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resolver_conversion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resolver_conversion.go @@ -37,7 +37,7 @@ func (rr *ResolverRef) convertFrom(ctx context.Context, source v1.ResolverRef) { rr.Params = nil for _, r := range source.Params { new := Param{} - new.convertFrom(ctx, r) + new.ConvertFrom(ctx, r) rr.Params = append(rr.Params, new) } } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_conversion.go index 5e0facad2a..c695de103c 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_conversion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_conversion.go @@ -26,20 +26,24 @@ func (r TaskResult) convertTo(ctx context.Context, sink *v1.TaskResult) { sink.Name = r.Name sink.Type = v1.ResultsType(r.Type) sink.Description = r.Description - properties := make(map[string]v1.PropertySpec) - for k, v := range r.Properties { - properties[k] = v1.PropertySpec{Type: v1.ParamType(v.Type)} + if r.Properties != nil { + properties := make(map[string]v1.PropertySpec) + for k, v := range r.Properties { + properties[k] = v1.PropertySpec{Type: v1.ParamType(v.Type)} + } + sink.Properties = properties } - sink.Properties = properties } func (r *TaskResult) convertFrom(ctx context.Context, source v1.TaskResult) { r.Name = source.Name r.Type = ResultsType(source.Type) r.Description = source.Description - properties := make(map[string]PropertySpec) - for k, v := range source.Properties { - properties[k] = PropertySpec{Type: ParamType(v.Type)} + if source.Properties != nil { + properties := make(map[string]PropertySpec) + for k, v := range source.Properties { + properties[k] = PropertySpec{Type: ParamType(v.Type)} + } + r.Properties = properties } - r.Properties = properties } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_validation.go index fe2fca41a6..ab9ee83c35 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_validation.go @@ -17,8 +17,6 @@ import ( "context" "fmt" - "github.com/tektoncd/pipeline/pkg/apis/config" - "github.com/tektoncd/pipeline/pkg/apis/version" "knative.dev/pkg/apis" ) @@ -29,14 +27,10 @@ func (tr TaskResult) Validate(ctx context.Context) (errs *apis.FieldError) { } switch { - // Object results is a beta feature - make sure the feature flag is set to "beta" case tr.Type == ResultsTypeObject: errs := validateObjectResult(tr) - errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "results type", config.BetaAPIFields)) return errs - // Array results is a beta feature - make sure the feature flag is set to "beta" case tr.Type == ResultsTypeArray: - errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "results type", config.BetaAPIFields)) return errs // Resources created before the result. Type was introduced may not have Type set // and should be considered valid diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/swagger.json b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/swagger.json index c0b8e2770c..2bbb8926db 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/swagger.json +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/swagger.json @@ -680,7 +680,7 @@ } }, "v1beta1.Pipeline": { - "description": "Pipeline describes a list of Tasks to execute. It expresses how outputs of tasks feed into inputs of subsequent tasks.", + "description": "Pipeline describes a list of Tasks to execute. It expresses how outputs of tasks feed into inputs of subsequent tasks.\n\nDeprecated: Please use v1.Pipeline instead.", "type": "object", "properties": { "apiVersion": { @@ -834,7 +834,7 @@ } }, "v1beta1.PipelineRun": { - "description": "PipelineRun represents a single execution of a Pipeline. PipelineRuns are how the graph of Tasks declared in a Pipeline are executed; they specify inputs to Pipelines such as parameter values and capture operational aspects of the Tasks execution such as service account and tolerations. Creating a PipelineRun creates TaskRuns for Tasks in the referenced Pipeline.", + "description": "PipelineRun represents a single execution of a Pipeline. PipelineRuns are how the graph of Tasks declared in a Pipeline are executed; they specify inputs to Pipelines such as parameter values and capture operational aspects of the Tasks execution such as service account and tolerations. Creating a PipelineRun creates TaskRuns for Tasks in the referenced Pipeline.\n\nDeprecated: Please use v1.PipelineRun instead.", "type": "object", "properties": { "apiVersion": { @@ -1314,7 +1314,7 @@ "$ref": "#/definitions/v1beta1.EmbeddedTask" }, "timeout": { - "description": "Time after which the TaskRun times out. Defaults to 1 hour. Specified TaskRun timeout should be less than 24h. Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", + "description": "Time after which the TaskRun times out. Defaults to 1 hour. Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", "$ref": "#/definitions/v1.Duration" }, "when": { @@ -1630,7 +1630,7 @@ "type": "array", "items": { "default": {}, - "$ref": "#/definitions/v1beta1.Param" + "$ref": "#/definitions/v1.Param" }, "x-kubernetes-list-type": "atomic" } @@ -1675,11 +1675,11 @@ }, "refSource": { "description": "RefSource is the source reference of the remote data that records the url, digest and the entrypoint.", - "$ref": "#/definitions/v1beta1.RefSource" + "$ref": "#/definitions/v1.RefSource" }, "source": { "description": "Deprecated: Use RefSource instead", - "$ref": "#/definitions/v1beta1.ConfigSource" + "$ref": "#/definitions/v1.RefSource" } } }, @@ -1699,11 +1699,11 @@ }, "refSource": { "description": "RefSource is the source reference of the remote data that records the url, digest and the entrypoint.", - "$ref": "#/definitions/v1beta1.RefSource" + "$ref": "#/definitions/v1.RefSource" }, "source": { "description": "Deprecated: Use RefSource instead", - "$ref": "#/definitions/v1beta1.ConfigSource" + "$ref": "#/definitions/v1.RefSource" } } }, @@ -2329,7 +2329,7 @@ } }, "v1beta1.Task": { - "description": "Task represents a collection of sequential steps that are run as part of a Pipeline using a set of inputs and producing a set of outputs. Tasks execute when TaskRuns are created that provide the input parameters and resources and output resources the Task requires.", + "description": "Task represents a collection of sequential steps that are run as part of a Pipeline using a set of inputs and producing a set of outputs. Tasks execute when TaskRuns are created that provide the input parameters and resources and output resources the Task requires.\n\nDeprecated: Please use v1.Task instead.", "type": "object", "properties": { "apiVersion": { @@ -2515,7 +2515,7 @@ } }, "v1beta1.TaskRun": { - "description": "TaskRun represents a single execution of a Task. TaskRuns are how the steps specified in a Task are executed; they specify the parameters and resources used to run the steps in a Task.", + "description": "TaskRun represents a single execution of a Task. TaskRuns are how the steps specified in a Task are executed; they specify the parameters and resources used to run the steps in a Task.\n\nDeprecated: Please use v1.TaskRun instead.", "type": "object", "properties": { "apiVersion": { @@ -2756,7 +2756,7 @@ "$ref": "#/definitions/v1beta1.TaskSpec" }, "timeout": { - "description": "Time after which one retry attempt times out. Defaults to 1 hour. Specified build timeout should be less than 24h. Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", + "description": "Time after which one retry attempt times out. Defaults to 1 hour. Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", "$ref": "#/definitions/v1.Duration" }, "workspaces": { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_conversion.go index 19bbec3fde..694e72dda2 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_conversion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_conversion.go @@ -18,12 +18,43 @@ package v1beta1 import ( "context" + "encoding/json" "fmt" + "github.com/tektoncd/pipeline/pkg/apis/version" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" "knative.dev/pkg/apis" ) +// TaskDeprecationsAnnotationKey is the annotation key for all deprecated fields of (a) Task(s) that belong(s) to an object. +// For example: a v1beta1.Pipeline contains two tasks +// +// spec: +// +// tasks: +// - name: task-1 +// stepTemplate: +// name: deprecated-name-field # deprecated field +// - name: task-2 +// steps: +// - tty: true # deprecated field +// +// The annotation would be: +// +// "tekton.dev/v1beta1.task-deprecations": `{ +// "task1":{ +// "deprecatedStepTemplates":{ +// "name":"deprecated-name-field" +// }, +// }, +// "task-2":{ +// "deprecatedSteps":[{"tty":true}], +// }, +// }` +const TaskDeprecationsAnnotationKey = "tekton.dev/v1beta1.task-deprecations" + var _ apis.Convertible = (*Task)(nil) // ConvertTo implements apis.Convertible @@ -34,14 +65,17 @@ func (t *Task) ConvertTo(ctx context.Context, to apis.Convertible) error { switch sink := to.(type) { case *v1.Task: sink.ObjectMeta = t.ObjectMeta - return t.Spec.ConvertTo(ctx, &sink.Spec) + return t.Spec.ConvertTo(ctx, &sink.Spec, &sink.ObjectMeta, t.Name) default: return fmt.Errorf("unknown version, got: %T", sink) } } // ConvertTo implements apis.Convertible -func (ts *TaskSpec) ConvertTo(ctx context.Context, sink *v1.TaskSpec) error { +func (ts *TaskSpec) ConvertTo(ctx context.Context, sink *v1.TaskSpec, meta *metav1.ObjectMeta, taskName string) error { + if err := serializeTaskDeprecations(meta, ts, taskName); err != nil { + return err + } sink.Steps = nil for _, s := range ts.Steps { new := v1.Step{} @@ -91,14 +125,14 @@ func (t *Task) ConvertFrom(ctx context.Context, from apis.Convertible) error { switch source := from.(type) { case *v1.Task: t.ObjectMeta = source.ObjectMeta - return t.Spec.ConvertFrom(ctx, &source.Spec) + return t.Spec.ConvertFrom(ctx, &source.Spec, &t.ObjectMeta, t.Name) default: return fmt.Errorf("unknown version, got: %T", t) } } // ConvertFrom implements apis.Convertible -func (ts *TaskSpec) ConvertFrom(ctx context.Context, source *v1.TaskSpec) error { +func (ts *TaskSpec) ConvertFrom(ctx context.Context, source *v1.TaskSpec, meta *metav1.ObjectMeta, taskName string) error { ts.Steps = nil for _, s := range source.Steps { new := Step{} @@ -111,6 +145,9 @@ func (ts *TaskSpec) ConvertFrom(ctx context.Context, source *v1.TaskSpec) error new.convertFrom(ctx, source.StepTemplate) ts.StepTemplate = &new } + if err := deserializeTaskDeprecations(meta, ts, taskName); err != nil { + return err + } ts.Sidecars = nil for _, s := range source.Sidecars { new := Sidecar{} @@ -139,3 +176,140 @@ func (ts *TaskSpec) ConvertFrom(ctx context.Context, source *v1.TaskSpec) error ts.Description = source.Description return nil } + +// taskDeprecation contains deprecated fields of a Task +// +k8s:openapi-gen=false +type taskDeprecation struct { + // DeprecatedSteps contains Steps of a Task that with deprecated fields defined. + // +listType=atomic + DeprecatedSteps []Step `json:"deprecatedSteps,omitempty"` + // DeprecatedStepTemplate contains stepTemplate of a Task that with deprecated fields defined. + DeprecatedStepTemplate *StepTemplate `json:"deprecatedStepTemplate,omitempty"` +} + +// taskDeprecations contains deprecated fields of Tasks that belong to the same Pipeline or PipelineRun +// the key is Task name +// +k8s:openapi-gen=false +type taskDeprecations map[string]taskDeprecation + +// serializeTaskDeprecations appends the current Task's deprecation info to annotation of the object. +// The object could be Task, TaskRun, Pipeline or PipelineRun +func serializeTaskDeprecations(meta *metav1.ObjectMeta, spec *TaskSpec, taskName string) error { + var taskDeprecation *taskDeprecation + if spec.HasDeprecatedFields() { + taskDeprecation = retrieveTaskDeprecation(spec) + } + var existingDeprecations = taskDeprecations{} + if str, ok := meta.Annotations[TaskDeprecationsAnnotationKey]; ok { + if err := json.Unmarshal([]byte(str), &existingDeprecations); err != nil { + return fmt.Errorf("error deserializing key %s from metadata: %w", TaskDeprecationsAnnotationKey, err) + } + } + if taskDeprecation != nil { + existingDeprecations[taskName] = *taskDeprecation + return version.SerializeToMetadata(meta, existingDeprecations, TaskDeprecationsAnnotationKey) + } + return nil +} + +// deserializeTaskDeprecations retrieves deprecation info of the Task from object annotation. +// The object could be Task, TaskRun, Pipeline or PipelineRun. +func deserializeTaskDeprecations(meta *metav1.ObjectMeta, spec *TaskSpec, taskName string) error { + var existingDeprecations = taskDeprecations{} + if meta == nil || meta.Annotations == nil { + return nil + } + if str, ok := meta.Annotations[TaskDeprecationsAnnotationKey]; ok { + if err := json.Unmarshal([]byte(str), &existingDeprecations); err != nil { + return fmt.Errorf("error deserializing key %s from metadata: %w", TaskDeprecationsAnnotationKey, err) + } + } + if td, ok := existingDeprecations[taskName]; ok { + if len(spec.Steps) != len(td.DeprecatedSteps) { + return fmt.Errorf("length of deserialized steps mismatch the length of target steps") + } + for i := 0; i < len(spec.Steps); i++ { + spec.Steps[i].DeprecatedPorts = td.DeprecatedSteps[i].DeprecatedPorts + spec.Steps[i].DeprecatedLivenessProbe = td.DeprecatedSteps[i].DeprecatedLivenessProbe + spec.Steps[i].DeprecatedReadinessProbe = td.DeprecatedSteps[i].DeprecatedReadinessProbe + spec.Steps[i].DeprecatedStartupProbe = td.DeprecatedSteps[i].DeprecatedStartupProbe + spec.Steps[i].DeprecatedLifecycle = td.DeprecatedSteps[i].DeprecatedLifecycle + spec.Steps[i].DeprecatedTerminationMessagePath = td.DeprecatedSteps[i].DeprecatedTerminationMessagePath + spec.Steps[i].DeprecatedTerminationMessagePolicy = td.DeprecatedSteps[i].DeprecatedTerminationMessagePolicy + spec.Steps[i].DeprecatedStdin = td.DeprecatedSteps[i].DeprecatedStdin + spec.Steps[i].DeprecatedStdinOnce = td.DeprecatedSteps[i].DeprecatedStdinOnce + spec.Steps[i].DeprecatedTTY = td.DeprecatedSteps[i].DeprecatedTTY + } + if td.DeprecatedStepTemplate != nil { + if spec.StepTemplate == nil { + spec.StepTemplate = &StepTemplate{} + } + spec.StepTemplate.DeprecatedName = td.DeprecatedStepTemplate.DeprecatedName + spec.StepTemplate.DeprecatedPorts = td.DeprecatedStepTemplate.DeprecatedPorts + spec.StepTemplate.DeprecatedLivenessProbe = td.DeprecatedStepTemplate.DeprecatedLivenessProbe + spec.StepTemplate.DeprecatedReadinessProbe = td.DeprecatedStepTemplate.DeprecatedReadinessProbe + spec.StepTemplate.DeprecatedStartupProbe = td.DeprecatedStepTemplate.DeprecatedStartupProbe + spec.StepTemplate.DeprecatedLifecycle = td.DeprecatedStepTemplate.DeprecatedLifecycle + spec.StepTemplate.DeprecatedTerminationMessagePath = td.DeprecatedStepTemplate.DeprecatedTerminationMessagePath + spec.StepTemplate.DeprecatedTerminationMessagePolicy = td.DeprecatedStepTemplate.DeprecatedTerminationMessagePolicy + spec.StepTemplate.DeprecatedStdin = td.DeprecatedStepTemplate.DeprecatedStdin + spec.StepTemplate.DeprecatedStdinOnce = td.DeprecatedStepTemplate.DeprecatedStdinOnce + spec.StepTemplate.DeprecatedTTY = td.DeprecatedStepTemplate.DeprecatedTTY + } + delete(existingDeprecations, taskName) + if len(existingDeprecations) == 0 { + delete(meta.Annotations, TaskDeprecationsAnnotationKey) + } else { + updatedDeprecations, err := json.Marshal(existingDeprecations) + if err != nil { + return err + } + meta.Annotations[TaskDeprecationsAnnotationKey] = string(updatedDeprecations) + } + if len(meta.Annotations) == 0 { + meta.Annotations = nil + } + } + return nil +} + +func retrieveTaskDeprecation(spec *TaskSpec) *taskDeprecation { + if !spec.HasDeprecatedFields() { + return nil + } + ds := []Step{} + for _, s := range spec.Steps { + ds = append(ds, Step{ + DeprecatedPorts: s.DeprecatedPorts, + DeprecatedLivenessProbe: s.DeprecatedLivenessProbe, + DeprecatedReadinessProbe: s.DeprecatedReadinessProbe, + DeprecatedStartupProbe: s.DeprecatedStartupProbe, + DeprecatedLifecycle: s.DeprecatedLifecycle, + DeprecatedTerminationMessagePath: s.DeprecatedTerminationMessagePath, + DeprecatedTerminationMessagePolicy: s.DeprecatedTerminationMessagePolicy, + DeprecatedStdin: s.DeprecatedStdin, + DeprecatedStdinOnce: s.DeprecatedStdinOnce, + DeprecatedTTY: s.DeprecatedTTY, + }) + } + var dst *StepTemplate + if spec.StepTemplate != nil { + dst = &StepTemplate{ + DeprecatedName: spec.StepTemplate.DeprecatedName, + DeprecatedPorts: spec.StepTemplate.DeprecatedPorts, + DeprecatedLivenessProbe: spec.StepTemplate.DeprecatedLivenessProbe, + DeprecatedReadinessProbe: spec.StepTemplate.DeprecatedReadinessProbe, + DeprecatedStartupProbe: spec.StepTemplate.DeprecatedStartupProbe, + DeprecatedLifecycle: spec.StepTemplate.DeprecatedLifecycle, + DeprecatedTerminationMessagePath: spec.StepTemplate.DeprecatedTerminationMessagePath, + DeprecatedTerminationMessagePolicy: spec.StepTemplate.DeprecatedTerminationMessagePolicy, + DeprecatedStdin: spec.StepTemplate.DeprecatedStdin, + DeprecatedStdinOnce: spec.StepTemplate.DeprecatedStdinOnce, + DeprecatedTTY: spec.StepTemplate.DeprecatedTTY, + } + } + return &taskDeprecation{ + DeprecatedSteps: ds, + DeprecatedStepTemplate: dst, + } +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_types.go index 850929d015..8553092566 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_types.go @@ -28,13 +28,14 @@ import ( // +genclient:noStatus // +genreconciler:krshapedlogic=false // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:openapi-gen=true // Task represents a collection of sequential steps that are run as part of a // Pipeline using a set of inputs and producing a set of outputs. Tasks execute // when TaskRuns are created that provide the input parameters and resources and // output resources the Task requires. // -// +k8s:openapi-gen=true +// Deprecated: Please use v1.Task instead. type Task struct { metav1.TypeMeta `json:",inline"` // +optional @@ -130,3 +131,41 @@ type TaskList struct { metav1.ListMeta `json:"metadata,omitempty"` Items []Task `json:"items"` } + +// HasDeprecatedFields returns true if the TaskSpec has deprecated field specified. +func (ts *TaskSpec) HasDeprecatedFields() bool { + if ts == nil { + return false + } + if len(ts.Steps) > 0 { + for _, s := range ts.Steps { + if len(s.DeprecatedPorts) > 0 || + s.DeprecatedLivenessProbe != nil || + s.DeprecatedReadinessProbe != nil || + s.DeprecatedStartupProbe != nil || + s.DeprecatedLifecycle != nil || + s.DeprecatedTerminationMessagePath != "" || + s.DeprecatedTerminationMessagePolicy != "" || + s.DeprecatedStdin || + s.DeprecatedStdinOnce || + s.DeprecatedTTY { + return true + } + } + } + if ts.StepTemplate != nil { + if len(ts.StepTemplate.DeprecatedPorts) > 0 || + ts.StepTemplate.DeprecatedName != "" || + ts.StepTemplate.DeprecatedReadinessProbe != nil || + ts.StepTemplate.DeprecatedStartupProbe != nil || + ts.StepTemplate.DeprecatedLifecycle != nil || + ts.StepTemplate.DeprecatedTerminationMessagePath != "" || + ts.StepTemplate.DeprecatedTerminationMessagePolicy != "" || + ts.StepTemplate.DeprecatedStdin || + ts.StepTemplate.DeprecatedStdinOnce || + ts.StepTemplate.DeprecatedTTY { + return true + } + } + return false +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_validation.go index e76f342121..aec142cf04 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_validation.go @@ -62,8 +62,10 @@ var objectVariableNameFormatRegex = regexp.MustCompile(objectVariableNameFormat) // Validate implements apis.Validatable func (t *Task) Validate(ctx context.Context) *apis.FieldError { errs := validate.ObjectMetadata(t.GetObjectMeta()).ViaField("metadata") - ctx = config.SkipValidationDueToPropagatedParametersAndWorkspaces(ctx, false) - return errs.Also(t.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec")) + errs = errs.Also(t.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec")) + // When a Task is created directly, instead of declared inline in a TaskRun or PipelineRun, + // we do not support propagated parameters. Validate that all params it uses are declared. + return errs.Also(ValidateUsageOfDeclaredParameters(ctx, t.Spec.Steps, t.Spec.Params).ViaField("spec")) } // Validate implements apis.Validatable @@ -72,14 +74,6 @@ func (ts *TaskSpec) Validate(ctx context.Context) (errs *apis.FieldError) { errs = errs.Also(apis.ErrMissingField("steps")) } - if config.IsSubstituted(ctx) { - // Validate the task's workspaces only. - errs = errs.Also(validateDeclaredWorkspaces(ts.Workspaces, ts.Steps, ts.StepTemplate).ViaField("workspaces")) - errs = errs.Also(validateWorkspaceUsages(ctx, ts)) - - return errs - } - errs = errs.Also(ValidateVolumes(ts.Volumes).ViaField("volumes")) errs = errs.Also(validateDeclaredWorkspaces(ts.Workspaces, ts.Steps, ts.StepTemplate).ViaField("workspaces")) errs = errs.Also(validateWorkspaceUsages(ctx, ts)) @@ -105,6 +99,28 @@ func (ts *TaskSpec) Validate(ctx context.Context) (errs *apis.FieldError) { return errs } +// ValidateUsageOfDeclaredParameters validates that all parameters referenced in the Task are declared by the Task. +func ValidateUsageOfDeclaredParameters(ctx context.Context, steps []Step, params ParamSpecs) *apis.FieldError { + var errs *apis.FieldError + _, _, objectParams := params.sortByType() + allParameterNames := sets.NewString(params.getNames()...) + errs = errs.Also(validateVariables(ctx, steps, "params", allParameterNames)) + errs = errs.Also(validateObjectUsage(ctx, steps, objectParams)) + errs = errs.Also(validateObjectParamsHaveProperties(ctx, params)) + return errs +} + +// validateObjectParamsHaveProperties returns an error if any declared object params are missing properties +func validateObjectParamsHaveProperties(ctx context.Context, params ParamSpecs) *apis.FieldError { + var errs *apis.FieldError + for _, p := range params { + if p.Type == ParamTypeObject && p.Properties == nil { + errs = errs.Also(apis.ErrMissingField(fmt.Sprintf("%s.properties", p.Name))) + } + } + return errs +} + func validateSidecarNames(sidecars []Sidecar) (errs *apis.FieldError) { for _, sc := range sidecars { if sc.Name == pipeline.ReservedResultsSidecarName { @@ -160,8 +176,12 @@ func validateDeclaredWorkspaces(workspaces []WorkspaceDeclaration, steps []Step, // validateWorkspaceUsages checks that all WorkspaceUsage objects in Steps // refer to workspaces that are defined in the Task. // -// This is an alpha feature and will fail validation if it's used by a step -// or sidecar when the enable-api-fields feature gate is anything but "alpha". +// This is a beta feature and will fail validation if it's used by a step +// or sidecar when the enable-api-fields feature gate is anything but "beta". +// +// Note that this feature reached beta after the v1 API version has been released and +// consequently it is *not* implicitly enabled on the v1beta1 API to avoid suffering +// from the issues described in TEP-0138 https://github.com/tektoncd/community/pull/1034 func validateWorkspaceUsages(ctx context.Context, ts *TaskSpec) (errs *apis.FieldError) { workspaces := ts.Workspaces steps := ts.Steps @@ -174,7 +194,7 @@ func validateWorkspaceUsages(ctx context.Context, ts *TaskSpec) (errs *apis.Fiel for stepIdx, step := range steps { if len(step.Workspaces) != 0 { - errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "step workspaces", config.AlphaAPIFields).ViaIndex(stepIdx).ViaField("steps")) + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "step workspaces", config.BetaAPIFields).ViaIndex(stepIdx).ViaField("steps")) } for workspaceIdx, w := range step.Workspaces { if !wsNames.Has(w.Name) { @@ -185,7 +205,7 @@ func validateWorkspaceUsages(ctx context.Context, ts *TaskSpec) (errs *apis.Fiel for sidecarIdx, sidecar := range sidecars { if len(sidecar.Workspaces) != 0 { - errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "sidecar workspaces", config.AlphaAPIFields).ViaIndex(sidecarIdx).ViaField("sidecars")) + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "sidecar workspaces", config.BetaAPIFields).ViaIndex(sidecarIdx).ViaField("sidecars")) } for workspaceIdx, w := range sidecar.Workspaces { if !wsNames.Has(w.Name) { @@ -297,11 +317,6 @@ func validateStep(ctx context.Context, s Step, names sets.String) (errs *apis.Fi // ValidateParameterTypes validates all the types within a slice of ParamSpecs func ValidateParameterTypes(ctx context.Context, params []ParamSpec) (errs *apis.FieldError) { for _, p := range params { - if p.Type == ParamTypeObject { - // Object type parameter is a beta feature and will fail validation if it's used in a task spec - // when the enable-api-fields feature gate is not "alpha" or "beta". - errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "object type parameter", config.BetaAPIFields)) - } errs = errs.Also(p.ValidateType(ctx)) } return errs @@ -340,14 +355,6 @@ func (p ParamSpec) ValidateType(ctx context.Context) *apis.FieldError { // definition of `properties` section and the type of a PropertySpec is allowed. // (Currently, only string is allowed) func (p ParamSpec) ValidateObjectType(ctx context.Context) *apis.FieldError { - if p.Type == ParamTypeObject && p.Properties == nil { - // If this we are not skipping validation checks due to propagated params - // then properties field is required. - if config.ValidateParameterVariablesAndWorkspaces(ctx) { - return apis.ErrMissingField(fmt.Sprintf("%s.properties", p.Name)) - } - } - invalidKeys := []string{} for key, propertySpec := range p.Properties { if propertySpec.Type != ParamTypeString { @@ -366,38 +373,17 @@ func (p ParamSpec) ValidateObjectType(ctx context.Context) *apis.FieldError { } // ValidateParameterVariables validates all variables within a slice of ParamSpecs against a slice of Steps -func ValidateParameterVariables(ctx context.Context, steps []Step, params []ParamSpec) *apis.FieldError { - allParameterNames := sets.NewString() - stringParameterNames := sets.NewString() - arrayParameterNames := sets.NewString() - objectParamSpecs := []ParamSpec{} +func ValidateParameterVariables(ctx context.Context, steps []Step, params ParamSpecs) *apis.FieldError { var errs *apis.FieldError - for _, p := range params { - // validate no duplicate names - if allParameterNames.Has(p.Name) { - errs = errs.Also(apis.ErrGeneric("parameter appears more than once", "").ViaFieldKey("params", p.Name)) - } - allParameterNames.Insert(p.Name) - - switch p.Type { - case ParamTypeArray: - arrayParameterNames.Insert(p.Name) - case ParamTypeObject: - objectParamSpecs = append(objectParamSpecs, p) - case ParamTypeString: - fallthrough - default: - stringParameterNames.Insert(p.Name) - } - } - errs = errs.Also(validateNameFormat(stringParameterNames.Insert(arrayParameterNames.List()...), objectParamSpecs)) - if config.ValidateParameterVariablesAndWorkspaces(ctx) { - errs = errs.Also(validateVariables(ctx, steps, "params", allParameterNames)) - errs = errs.Also(validateObjectUsage(ctx, steps, objectParamSpecs)) - } + errs = errs.Also(params.validateNoDuplicateNames()) + stringParams, arrayParams, objectParams := params.sortByType() + stringParameterNames := sets.NewString(stringParams.getNames()...) + arrayParameterNames := sets.NewString(arrayParams.getNames()...) + errs = errs.Also(validateNameFormat(stringParameterNames.Insert(arrayParameterNames.List()...), objectParams)) return errs.Also(validateArrayUsage(steps, "params", arrayParameterNames)) } +// validateTaskContextVariables returns an error if any Steps reference context variables that don't exist. func validateTaskContextVariables(ctx context.Context, steps []Step) *apis.FieldError { taskRunContextNames := sets.NewString().Insert( "name", @@ -419,7 +405,7 @@ func validateTaskResultsVariables(ctx context.Context, steps []Step, results []T resultsNames.Insert(r.Name) } for idx, step := range steps { - errs = errs.Also(validateTaskVariable(step.Script, "results", resultsNames).ViaField("script").ViaFieldIndex("steps", idx)) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(step.Script, "results", resultsNames).ViaField("script").ViaFieldIndex("steps", idx)) } return errs } @@ -444,8 +430,7 @@ func validateObjectUsage(ctx context.Context, steps []Step, params []ParamSpec) return errs.Also(validateObjectUsageAsWhole(steps, "params", objectParameterNames)) } -// validateObjectUsageAsWhole makes sure the object params are not used as whole when providing values for strings -// i.e. param.objectParam, param.objectParam[*] +// validateObjectUsageAsWhole returns an error if the Steps contain references to the entire input object params in fields where these references are prohibited func validateObjectUsageAsWhole(steps []Step, prefix string, vars sets.String) (errs *apis.FieldError) { for idx, step := range steps { errs = errs.Also(validateStepObjectUsageAsWhole(step, prefix, vars)).ViaFieldIndex("steps", idx) @@ -453,57 +438,61 @@ func validateObjectUsageAsWhole(steps []Step, prefix string, vars sets.String) ( return errs } +// validateStepObjectUsageAsWhole returns an error if the Step contains references to the entire input object params in fields where these references are prohibited func validateStepObjectUsageAsWhole(step Step, prefix string, vars sets.String) *apis.FieldError { - errs := validateTaskNoObjectReferenced(step.Name, prefix, vars).ViaField("name") - errs = errs.Also(validateTaskNoObjectReferenced(step.Image, prefix, vars).ViaField("image")) - errs = errs.Also(validateTaskNoObjectReferenced(step.WorkingDir, prefix, vars).ViaField("workingDir")) - errs = errs.Also(validateTaskNoObjectReferenced(step.Script, prefix, vars).ViaField("script")) + errs := substitution.ValidateNoReferencesToEntireProhibitedVariables(step.Name, prefix, vars).ViaField("name") + errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(step.Image, prefix, vars).ViaField("image")) + errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(step.WorkingDir, prefix, vars).ViaField("workingDir")) + errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(step.Script, prefix, vars).ViaField("script")) for i, cmd := range step.Command { - errs = errs.Also(validateTaskNoObjectReferenced(cmd, prefix, vars).ViaFieldIndex("command", i)) + errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(cmd, prefix, vars).ViaFieldIndex("command", i)) } for i, arg := range step.Args { - errs = errs.Also(validateTaskNoObjectReferenced(arg, prefix, vars).ViaFieldIndex("args", i)) + errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(arg, prefix, vars).ViaFieldIndex("args", i)) } for _, env := range step.Env { - errs = errs.Also(validateTaskNoObjectReferenced(env.Value, prefix, vars).ViaFieldKey("env", env.Name)) + errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(env.Value, prefix, vars).ViaFieldKey("env", env.Name)) } for i, v := range step.VolumeMounts { - errs = errs.Also(validateTaskNoObjectReferenced(v.Name, prefix, vars).ViaField("name").ViaFieldIndex("volumeMount", i)) - errs = errs.Also(validateTaskNoObjectReferenced(v.MountPath, prefix, vars).ViaField("mountPath").ViaFieldIndex("volumeMount", i)) - errs = errs.Also(validateTaskNoObjectReferenced(v.SubPath, prefix, vars).ViaField("subPath").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(v.Name, prefix, vars).ViaField("name").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(v.MountPath, prefix, vars).ViaField("mountPath").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(v.SubPath, prefix, vars).ViaField("subPath").ViaFieldIndex("volumeMount", i)) } return errs } -func validateArrayUsage(steps []Step, prefix string, vars sets.String) (errs *apis.FieldError) { +// validateArrayUsage returns an error if the Steps contain references to the input array params in fields where these references are prohibited +func validateArrayUsage(steps []Step, prefix string, arrayParamNames sets.String) (errs *apis.FieldError) { for idx, step := range steps { - errs = errs.Also(validateStepArrayUsage(step, prefix, vars)).ViaFieldIndex("steps", idx) + errs = errs.Also(validateStepArrayUsage(step, prefix, arrayParamNames)).ViaFieldIndex("steps", idx) } return errs } -func validateStepArrayUsage(step Step, prefix string, vars sets.String) *apis.FieldError { - errs := validateTaskNoArrayReferenced(step.Name, prefix, vars).ViaField("name") - errs = errs.Also(validateTaskNoArrayReferenced(step.Image, prefix, vars).ViaField("image")) - errs = errs.Also(validateTaskNoArrayReferenced(step.WorkingDir, prefix, vars).ViaField("workingDir")) - errs = errs.Also(validateTaskNoArrayReferenced(step.Script, prefix, vars).ViaField("script")) +// validateStepArrayUsage returns an error if the Step contains references to the input array params in fields where these references are prohibited +func validateStepArrayUsage(step Step, prefix string, arrayParamNames sets.String) *apis.FieldError { + errs := substitution.ValidateNoReferencesToProhibitedVariables(step.Name, prefix, arrayParamNames).ViaField("name") + errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(step.Image, prefix, arrayParamNames).ViaField("image")) + errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(step.WorkingDir, prefix, arrayParamNames).ViaField("workingDir")) + errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(step.Script, prefix, arrayParamNames).ViaField("script")) for i, cmd := range step.Command { - errs = errs.Also(validateTaskArraysIsolated(cmd, prefix, vars).ViaFieldIndex("command", i)) + errs = errs.Also(substitution.ValidateVariableReferenceIsIsolated(cmd, prefix, arrayParamNames).ViaFieldIndex("command", i)) } for i, arg := range step.Args { - errs = errs.Also(validateTaskArraysIsolated(arg, prefix, vars).ViaFieldIndex("args", i)) + errs = errs.Also(substitution.ValidateVariableReferenceIsIsolated(arg, prefix, arrayParamNames).ViaFieldIndex("args", i)) } for _, env := range step.Env { - errs = errs.Also(validateTaskNoArrayReferenced(env.Value, prefix, vars).ViaFieldKey("env", env.Name)) + errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(env.Value, prefix, arrayParamNames).ViaFieldKey("env", env.Name)) } for i, v := range step.VolumeMounts { - errs = errs.Also(validateTaskNoArrayReferenced(v.Name, prefix, vars).ViaField("name").ViaFieldIndex("volumeMount", i)) - errs = errs.Also(validateTaskNoArrayReferenced(v.MountPath, prefix, vars).ViaField("mountPath").ViaFieldIndex("volumeMount", i)) - errs = errs.Also(validateTaskNoArrayReferenced(v.SubPath, prefix, vars).ViaField("subPath").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(v.Name, prefix, arrayParamNames).ViaField("name").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(v.MountPath, prefix, arrayParamNames).ViaField("mountPath").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(v.SubPath, prefix, arrayParamNames).ViaField("subPath").ViaFieldIndex("volumeMount", i)) } return errs } +// validateVariables returns an error if the Steps contain references to any unknown variables func validateVariables(ctx context.Context, steps []Step, prefix string, vars sets.String) (errs *apis.FieldError) { // We've checked param name format. Now, we want to check if param names are referenced correctly in each step for idx, step := range steps { @@ -561,68 +550,41 @@ func validateNameFormat(stringAndArrayParams sets.String, objectParams []ParamSp return errs } +// validateStepVariables returns an error if the Step contains references to any unknown variables func validateStepVariables(ctx context.Context, step Step, prefix string, vars sets.String) *apis.FieldError { - errs := validateTaskVariable(step.Name, prefix, vars).ViaField("name") - errs = errs.Also(validateTaskVariable(step.Image, prefix, vars).ViaField("image")) - errs = errs.Also(validateTaskVariable(step.WorkingDir, prefix, vars).ViaField("workingDir")) - errs = errs.Also(validateTaskVariable(step.Script, prefix, vars).ViaField("script")) + errs := substitution.ValidateNoReferencesToUnknownVariables(step.Name, prefix, vars).ViaField("name") + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(step.Image, prefix, vars).ViaField("image")) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(step.WorkingDir, prefix, vars).ViaField("workingDir")) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(step.Script, prefix, vars).ViaField("script")) for i, cmd := range step.Command { - errs = errs.Also(validateTaskVariable(cmd, prefix, vars).ViaFieldIndex("command", i)) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(cmd, prefix, vars).ViaFieldIndex("command", i)) } for i, arg := range step.Args { - errs = errs.Also(validateTaskVariable(arg, prefix, vars).ViaFieldIndex("args", i)) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(arg, prefix, vars).ViaFieldIndex("args", i)) } for _, env := range step.Env { - errs = errs.Also(validateTaskVariable(env.Value, prefix, vars).ViaFieldKey("env", env.Name)) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(env.Value, prefix, vars).ViaFieldKey("env", env.Name)) } for i, v := range step.VolumeMounts { - errs = errs.Also(validateTaskVariable(v.Name, prefix, vars).ViaField("name").ViaFieldIndex("volumeMount", i)) - errs = errs.Also(validateTaskVariable(v.MountPath, prefix, vars).ViaField("MountPath").ViaFieldIndex("volumeMount", i)) - errs = errs.Also(validateTaskVariable(v.SubPath, prefix, vars).ViaField("SubPath").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(v.Name, prefix, vars).ViaField("name").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(v.MountPath, prefix, vars).ViaField("MountPath").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(v.SubPath, prefix, vars).ViaField("SubPath").ViaFieldIndex("volumeMount", i)) } - errs = errs.Also(validateTaskVariable(string(step.OnError), prefix, vars).ViaField("onError")) + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(string(step.OnError), prefix, vars).ViaField("onError")) return errs } -func validateTaskVariable(value, prefix string, vars sets.String) *apis.FieldError { - return substitution.ValidateVariableP(value, prefix, vars) -} - -func validateTaskNoObjectReferenced(value, prefix string, objectNames sets.String) *apis.FieldError { - return substitution.ValidateEntireVariableProhibitedP(value, prefix, objectNames) -} - -func validateTaskNoArrayReferenced(value, prefix string, arrayNames sets.String) *apis.FieldError { - return substitution.ValidateVariableProhibitedP(value, prefix, arrayNames) -} - -func validateTaskArraysIsolated(value, prefix string, arrayNames sets.String) *apis.FieldError { - return substitution.ValidateVariableIsolatedP(value, prefix, arrayNames) -} - // isParamRefs attempts to check if a specified string looks like it contains any parameter reference // This is useful to make sure the specified value looks like a Parameter Reference before performing any strict validation func isParamRefs(s string) bool { return strings.HasPrefix(s, "$("+ParamsPrefix) } -// ValidateParamArrayIndex validates if the param reference to an array param is out of bound. -// error is returned when the array indexing reference is out of bound of the array param -// e.g. if a param reference of $(params.array-param[2]) and the array param is of length 2. -// - `trParams` are params from taskrun. -// - `taskSpec` contains params declarations. -func (ts *TaskSpec) ValidateParamArrayIndex(ctx context.Context, params Params) error { - cfg := config.FromContextOrDefaults(ctx) - if cfg.FeatureFlags.EnableAPIFields != config.AlphaAPIFields { - return nil - } - - // Collect all array params lengths - arrayParamsLengths := ts.Params.extractParamArrayLengths() - for k, v := range params.extractParamArrayLengths() { - arrayParamsLengths[k] = v - } - +// GetIndexingReferencesToArrayParams returns all strings referencing indices of TaskRun array parameters +// from parameters, workspaces, and when expressions defined in the Task. +// For example, if a Task has a parameter with a value "$(params.array-param-name[1])", +// this would be one of the strings returned. +func (ts *TaskSpec) GetIndexingReferencesToArrayParams() sets.String { // collect all the possible places to use param references paramsRefs := []string{} paramsRefs = append(paramsRefs, extractParamRefsFromSteps(ts.Steps)...) @@ -632,12 +594,10 @@ func (ts *TaskSpec) ValidateParamArrayIndex(ctx context.Context, params Params) paramsRefs = append(paramsRefs, v.MountPath) } paramsRefs = append(paramsRefs, extractParamRefsFromSidecars(ts.Sidecars)...) - // extract all array indexing references, for example []{"$(params.array-params[1])"} arrayIndexParamRefs := []string{} for _, p := range paramsRefs { arrayIndexParamRefs = append(arrayIndexParamRefs, extractArrayIndexingParamRefs(p)...) } - - return validateOutofBoundArrayParams(arrayIndexParamRefs, arrayParamsLengths) + return sets.NewString(arrayIndexParamRefs...) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_conversion.go index e8e695194b..ab5f2a5614 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_conversion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_conversion.go @@ -34,7 +34,8 @@ func (tr TaskRef) convertTo(ctx context.Context, sink *v1.TaskRef) { tr.convertBundleToResolver(sink) } -func (tr *TaskRef) convertFrom(ctx context.Context, source v1.TaskRef) { +// ConvertFrom converts v1beta1 TaskRef from v1 TaskRef +func (tr *TaskRef) ConvertFrom(ctx context.Context, source v1.TaskRef) { tr.Name = source.Name tr.Kind = TaskKind(source.Kind) tr.APIVersion = source.APIVersion diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_validation.go index 0297139922..4e42b7aa2e 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_validation.go @@ -18,8 +18,10 @@ package v1beta1 import ( "context" + "strings" "github.com/google/go-containerregistry/pkg/name" + "k8s.io/apimachinery/pkg/util/validation" "knative.dev/pkg/apis" ) @@ -30,7 +32,8 @@ func (ref *TaskRef) Validate(ctx context.Context) (errs *apis.FieldError) { return } - if ref.Resolver != "" || ref.Params != nil { + switch { + case ref.Resolver != "" || ref.Params != nil: if ref.Resolver != "" { if ref.Name != "" { errs = errs.Also(apis.ErrMultipleOneOf("name", "resolver")) @@ -51,16 +54,21 @@ func (ref *TaskRef) Validate(ctx context.Context) (errs *apis.FieldError) { } errs = errs.Also(ValidateParameters(ctx, ref.Params)) } - } else { + case ref.Bundle != "": if ref.Name == "" { errs = errs.Also(apis.ErrMissingField("name")) } - if ref.Bundle != "" { - errs = errs.Also(validateBundleFeatureFlag(ctx, "bundle", true).ViaField("bundle")) - if _, err := name.ParseReference(ref.Bundle); err != nil { - errs = errs.Also(apis.ErrInvalidValue("invalid bundle reference", "bundle", err.Error())) - } + errs = errs.Also(validateBundleFeatureFlag(ctx, "bundle", true).ViaField("bundle")) + if _, err := name.ParseReference(ref.Bundle); err != nil { + errs = errs.Also(apis.ErrInvalidValue("invalid bundle reference", "bundle", err.Error())) + } + default: + if ref.Name == "" { + errs = errs.Also(apis.ErrMissingField("name")) + } else if errSlice := validation.IsQualifiedName(ref.Name); len(errSlice) != 0 { + // TaskRef name must be a valid k8s name + errs = errs.Also(apis.ErrInvalidValue(strings.Join(errSlice, ","), "name")) } } - return //nolint:nakedret + return errs } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_conversion.go index b2745ec5c7..d97da847a3 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_conversion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_conversion.go @@ -44,17 +44,17 @@ func (tr *TaskRun) ConvertTo(ctx context.Context, to apis.Convertible) error { if err := serializeTaskRunCloudEvents(&sink.ObjectMeta, &tr.Status); err != nil { return err } - if err := tr.Status.ConvertTo(ctx, &sink.Status); err != nil { + if err := tr.Status.ConvertTo(ctx, &sink.Status, &sink.ObjectMeta); err != nil { return err } - return tr.Spec.ConvertTo(ctx, &sink.Spec) + return tr.Spec.ConvertTo(ctx, &sink.Spec, &sink.ObjectMeta) default: return fmt.Errorf("unknown version, got: %T", sink) } } // ConvertTo implements apis.Convertible -func (trs *TaskRunSpec) ConvertTo(ctx context.Context, sink *v1.TaskRunSpec) error { +func (trs *TaskRunSpec) ConvertTo(ctx context.Context, sink *v1.TaskRunSpec, meta *metav1.ObjectMeta) error { if trs.Debug != nil { sink.Debug = &v1.TaskRunDebug{} trs.Debug.convertTo(ctx, sink.Debug) @@ -72,7 +72,7 @@ func (trs *TaskRunSpec) ConvertTo(ctx context.Context, sink *v1.TaskRunSpec) err } if trs.TaskSpec != nil { sink.TaskSpec = &v1.TaskSpec{} - err := trs.TaskSpec.ConvertTo(ctx, sink.TaskSpec) + err := trs.TaskSpec.ConvertTo(ctx, sink.TaskSpec, meta, meta.Name) if err != nil { return err } @@ -115,17 +115,17 @@ func (tr *TaskRun) ConvertFrom(ctx context.Context, from apis.Convertible) error if err := deserializeTaskRunCloudEvents(&tr.ObjectMeta, &tr.Status); err != nil { return err } - if err := tr.Status.ConvertFrom(ctx, source.Status); err != nil { + if err := tr.Status.ConvertFrom(ctx, source.Status, &tr.ObjectMeta); err != nil { return err } - return tr.Spec.ConvertFrom(ctx, &source.Spec) + return tr.Spec.ConvertFrom(ctx, &source.Spec, &tr.ObjectMeta) default: return fmt.Errorf("unknown version, got: %T", tr) } } // ConvertFrom implements apis.Convertible -func (trs *TaskRunSpec) ConvertFrom(ctx context.Context, source *v1.TaskRunSpec) error { +func (trs *TaskRunSpec) ConvertFrom(ctx context.Context, source *v1.TaskRunSpec, meta *metav1.ObjectMeta) error { if source.Debug != nil { newDebug := TaskRunDebug{} newDebug.convertFrom(ctx, *source.Debug) @@ -134,18 +134,18 @@ func (trs *TaskRunSpec) ConvertFrom(ctx context.Context, source *v1.TaskRunSpec) trs.Params = nil for _, p := range source.Params { new := Param{} - new.convertFrom(ctx, p) + new.ConvertFrom(ctx, p) trs.Params = append(trs.Params, new) } trs.ServiceAccountName = source.ServiceAccountName if source.TaskRef != nil { newTaskRef := TaskRef{} - newTaskRef.convertFrom(ctx, *source.TaskRef) + newTaskRef.ConvertFrom(ctx, *source.TaskRef) trs.TaskRef = &newTaskRef } if source.TaskSpec != nil { newTaskSpec := TaskSpec{} - err := newTaskSpec.ConvertFrom(ctx, source.TaskSpec) + err := newTaskSpec.ConvertFrom(ctx, source.TaskSpec, meta, meta.Name) if err != nil { return err } @@ -159,7 +159,7 @@ func (trs *TaskRunSpec) ConvertFrom(ctx context.Context, source *v1.TaskRunSpec) trs.Workspaces = nil for _, w := range source.Workspaces { new := WorkspaceBinding{} - new.convertFrom(ctx, w) + new.ConvertFrom(ctx, w) trs.Workspaces = append(trs.Workspaces, new) } trs.StepOverrides = nil @@ -207,7 +207,7 @@ func (trso *TaskRunSidecarOverride) convertFrom(ctx context.Context, source v1.T } // ConvertTo implements apis.Convertible -func (trs *TaskRunStatus) ConvertTo(ctx context.Context, sink *v1.TaskRunStatus) error { +func (trs *TaskRunStatus) ConvertTo(ctx context.Context, sink *v1.TaskRunStatus, meta *metav1.ObjectMeta) error { sink.Status = trs.Status sink.PodName = trs.PodName sink.StartTime = trs.StartTime @@ -221,7 +221,7 @@ func (trs *TaskRunStatus) ConvertTo(ctx context.Context, sink *v1.TaskRunStatus) sink.RetriesStatus = nil for _, rr := range trs.RetriesStatus { new := v1.TaskRunStatus{} - err := rr.ConvertTo(ctx, &new) + err := rr.ConvertTo(ctx, &new, meta) if err != nil { return err } @@ -242,7 +242,7 @@ func (trs *TaskRunStatus) ConvertTo(ctx context.Context, sink *v1.TaskRunStatus) if trs.TaskSpec != nil { sink.TaskSpec = &v1.TaskSpec{} - err := trs.TaskSpec.ConvertTo(ctx, sink.TaskSpec) + err := trs.TaskSpec.ConvertTo(ctx, sink.TaskSpec, meta, meta.Name) if err != nil { return err } @@ -256,7 +256,7 @@ func (trs *TaskRunStatus) ConvertTo(ctx context.Context, sink *v1.TaskRunStatus) } // ConvertFrom implements apis.Convertible -func (trs *TaskRunStatus) ConvertFrom(ctx context.Context, source v1.TaskRunStatus) error { +func (trs *TaskRunStatus) ConvertFrom(ctx context.Context, source v1.TaskRunStatus, meta *metav1.ObjectMeta) error { trs.Status = source.Status trs.PodName = source.PodName trs.StartTime = source.StartTime @@ -270,7 +270,7 @@ func (trs *TaskRunStatus) ConvertFrom(ctx context.Context, source v1.TaskRunStat trs.RetriesStatus = nil for _, rr := range source.RetriesStatus { new := TaskRunStatus{} - err := new.ConvertFrom(ctx, rr) + err := new.ConvertFrom(ctx, rr, meta) if err != nil { return err } @@ -291,7 +291,7 @@ func (trs *TaskRunStatus) ConvertFrom(ctx context.Context, source v1.TaskRunStat if source.TaskSpec != nil { trs.TaskSpec = &TaskSpec{} - err := trs.TaskSpec.ConvertFrom(ctx, source.TaskSpec) + err := trs.TaskSpec.ConvertFrom(ctx, source.TaskSpec, meta, meta.Name) if err != nil { return err } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_defaults.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_defaults.go index 61f3285dec..2e4896d47b 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_defaults.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_defaults.go @@ -24,6 +24,7 @@ import ( pod "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "knative.dev/pkg/apis" + "knative.dev/pkg/kmap" ) var _ apis.Defaultable = (*TaskRun)(nil) @@ -36,6 +37,13 @@ func (tr *TaskRun) SetDefaults(ctx context.Context) { ctx = apis.WithinParent(ctx, tr.ObjectMeta) tr.Spec.SetDefaults(ctx) + // Silently filtering out Tekton Reserved annotations at creation + if apis.IsInCreate(ctx) { + tr.ObjectMeta.Annotations = kmap.Filter(tr.ObjectMeta.Annotations, func(s string) bool { + return filterReservedAnnotationRegexp.MatchString(s) + }) + } + // If the TaskRun doesn't have a managed-by label, apply the default // specified in the config. cfg := config.FromContextOrDefaults(ctx) diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_types.go index 27bb514322..134e1c8137 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_types.go @@ -61,7 +61,6 @@ type TaskRunSpec struct { // +optional Retries int `json:"retries,omitempty"` // Time after which one retry attempt times out. Defaults to 1 hour. - // Specified build timeout should be less than 24h. // Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration // +optional Timeout *metav1.Duration `json:"timeout,omitempty"` @@ -389,12 +388,13 @@ type CloudEventDeliveryState struct { // +genclient // +genreconciler:krshapedlogic=false // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:openapi-gen=true // TaskRun represents a single execution of a Task. TaskRuns are how the steps // specified in a Task are executed; they specify the parameters and resources // used to run the steps in a Task. // -// +k8s:openapi-gen=true +// Deprecated: Please use v1.TaskRun instead. type TaskRun struct { metav1.TypeMeta `json:",inline"` // +optional @@ -450,11 +450,16 @@ func (tr *TaskRun) HasStarted() bool { return tr.Status.StartTime != nil && !tr.Status.StartTime.IsZero() } -// IsSuccessful returns true if the TaskRun's status indicates that it is done. +// IsSuccessful returns true if the TaskRun's status indicates that it has succeeded. func (tr *TaskRun) IsSuccessful() bool { return tr != nil && tr.Status.GetCondition(apis.ConditionSucceeded).IsTrue() } +// IsFailure returns true if the TaskRun's status indicates that it has failed. +func (tr *TaskRun) IsFailure() bool { + return tr != nil && tr.Status.GetCondition(apis.ConditionSucceeded).IsFalse() +} + // IsCancelled returns true if the TaskRun's spec status is set to Cancelled state func (tr *TaskRun) IsCancelled() bool { return tr.Spec.Status == TaskRunSpecStatusCancelled diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_validation.go index ef414612b4..300fd4aa93 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_validation.go @@ -62,8 +62,6 @@ func (ts *TaskRunSpec) Validate(ctx context.Context) (errs *apis.FieldError) { } // Validate TaskSpec if it's present. if ts.TaskSpec != nil { - // skip validation of parameter and workspaces variables since we validate them via taskrunspec below. - ctx = config.SkipValidationDueToPropagatedParametersAndWorkspaces(ctx, true) errs = errs.Also(ts.TaskSpec.Validate(ctx).ViaField("taskSpec")) } @@ -143,7 +141,8 @@ func (ts *TaskRunSpec) validateInlineParameters(ctx context.Context) (errs *apis } if ts.TaskSpec != nil && ts.TaskSpec.Steps != nil { errs = errs.Also(ValidateParameterTypes(ctx, paramSpec)) - errs = errs.Also(ValidateParameterVariables(config.SkipValidationDueToPropagatedParametersAndWorkspaces(ctx, false), ts.TaskSpec.Steps, paramSpec)) + errs = errs.Also(ValidateParameterVariables(ctx, ts.TaskSpec.Steps, paramSpec)) + errs = errs.Also(ValidateUsageOfDeclaredParameters(ctx, ts.TaskSpec.Steps, paramSpec)) } return errs } @@ -243,11 +242,6 @@ func ValidateWorkspaceBindings(ctx context.Context, wb []WorkspaceBinding) (errs func ValidateParameters(ctx context.Context, params Params) (errs *apis.FieldError) { var names []string for _, p := range params { - if p.Value.Type == ParamTypeObject { - // Object type parameter is a beta feature and will fail validation if it's used in a taskrun spec - // when the enable-api-fields feature gate is not "alpha" or "beta". - errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "object type parameter", config.BetaAPIFields)) - } names = append(names, p.Name) } return errs.Also(validateNoDuplicateNames(names, false)) diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/when_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/when_types.go index ac53da805d..e98eff147c 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/when_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/when_types.go @@ -102,9 +102,9 @@ func (wes WhenExpressions) AllowsExecution() bool { return true } -// ReplaceWhenExpressionsVariables interpolates variables, such as Parameters and Results, in +// ReplaceVariables interpolates variables, such as Parameters and Results, in // the Input and Values. -func (wes WhenExpressions) ReplaceWhenExpressionsVariables(replacements map[string]string, arrayReplacements map[string][]string) WhenExpressions { +func (wes WhenExpressions) ReplaceVariables(replacements map[string]string, arrayReplacements map[string][]string) WhenExpressions { replaced := wes for i := range wes { replaced[i] = wes[i].applyReplacements(replacements, arrayReplacements) diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/workspace_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/workspace_conversion.go index f7daa5cfb4..e1f33a7fbb 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/workspace_conversion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/workspace_conversion.go @@ -84,7 +84,8 @@ func (w WorkspaceBinding) convertTo(ctx context.Context, sink *v1.WorkspaceBindi sink.CSI = w.CSI } -func (w *WorkspaceBinding) convertFrom(ctx context.Context, source v1.WorkspaceBinding) { +// ConvertFrom converts v1beta1 Param from v1 Param +func (w *WorkspaceBinding) ConvertFrom(ctx context.Context, source v1.WorkspaceBinding) { w.Name = source.Name w.SubPath = source.SubPath w.VolumeClaimTemplate = source.VolumeClaimTemplate diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/version/conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/version/conversion.go index 1d509ceadd..35a691cb77 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/version/conversion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/version/conversion.go @@ -41,7 +41,7 @@ func SerializeToMetadata(meta *metav1.ObjectMeta, field interface{}, key string) // deserializes it into "to", and removes the key from the metadata's annotations. // Returns nil if the key is not present in the annotations. func DeserializeFromMetadata(meta *metav1.ObjectMeta, to interface{}, key string) error { - if meta.Annotations == nil { + if meta == nil || meta.Annotations == nil { return nil } if str, ok := meta.Annotations[key]; ok { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/substitution/substitution.go b/vendor/github.com/tektoncd/pipeline/pkg/substitution/substitution.go index cb93bc8e7f..69b2c0d827 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/substitution/substitution.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/substitution/substitution.go @@ -48,24 +48,12 @@ var paramIndexingRegex = regexp.MustCompile(paramIndexing) // intIndexRegex will match all `[int]` for param expression var intIndexRegex = regexp.MustCompile(intIndex) -// ValidateVariable makes sure all variables in the provided string are known -func ValidateVariable(name, value, prefix, locationName, path string, vars sets.String) *apis.FieldError { - if vs, present, _ := ExtractVariablesFromString(value, prefix); present { - for _, v := range vs { - v = strings.TrimSuffix(v, "[*]") - if !vars.Has(v) { - return &apis.FieldError{ - Message: fmt.Sprintf("non-existent variable in %q for %s %s", value, locationName, name), - Paths: []string{path + "." + name}, - } - } - } - } - return nil -} - -// ValidateVariableP makes sure all variables for a parameter in the provided string are known -func ValidateVariableP(value, prefix string, vars sets.String) *apis.FieldError { +// ValidateNoReferencesToUnknownVariables returns an error if the input string contains references to unknown variables +// Inputs: +// - value: a string containing a reference to a variable that can be substituted, e.g. "echo $(params.foo)" +// - prefix: the prefix of the substitutable variable, e.g. "params" or "context.pipeline" +// - vars: names of known variables +func ValidateNoReferencesToUnknownVariables(value, prefix string, vars sets.String) *apis.FieldError { if vs, present, errString := ExtractVariablesFromString(value, prefix); present { if errString != "" { return &apis.FieldError{ @@ -87,24 +75,14 @@ func ValidateVariableP(value, prefix string, vars sets.String) *apis.FieldError return nil } -// ValidateVariableProhibited verifies that variables matching the relevant string expressions do not reference any of the names present in vars. -func ValidateVariableProhibited(name, value, prefix, locationName, path string, vars sets.String) *apis.FieldError { - if vs, present, _ := ExtractVariablesFromString(value, prefix); present { - for _, v := range vs { - v = strings.TrimSuffix(v, "[*]") - if vars.Has(v) { - return &apis.FieldError{ - Message: fmt.Sprintf("variable type invalid in %q for %s %s", value, locationName, name), - Paths: []string{path + "." + name}, - } - } - } - } - return nil -} - -// ValidateVariableProhibitedP verifies that variables for a parameter matching the relevant string expressions do not reference any of the names present in vars. -func ValidateVariableProhibitedP(value, prefix string, vars sets.String) *apis.FieldError { +// ValidateNoReferencesToProhibitedVariables returns an error if the input string contains any references to any variables in vars, +// except for array indexing references. +// +// Inputs: +// - value: a string containing a reference to a variable that can be substituted, e.g. "echo $(params.foo)" +// - prefix: the prefix of the substitutable variable, e.g. "params" or "context.pipeline" +// - vars: names of known variables +func ValidateNoReferencesToProhibitedVariables(value, prefix string, vars sets.String) *apis.FieldError { if vs, present, errString := ExtractVariablesFromString(value, prefix); present { if errString != "" { return &apis.FieldError{ @@ -126,14 +104,20 @@ func ValidateVariableProhibitedP(value, prefix string, vars sets.String) *apis.F return nil } -// ValidateEntireVariableProhibitedP verifies that values of object type are not used as whole. -func ValidateEntireVariableProhibitedP(value, prefix string, vars sets.String) *apis.FieldError { +// ValidateNoReferencesToEntireProhibitedVariables returns an error if the input string contains any whole array/object references +// to any variables in vars. References to array indexes or object keys are permitted. +// +// Inputs: +// - value: a string containing a reference to a variable that can be substituted, e.g. "echo $(params.foo)" +// - prefix: the prefix of the substitutable variable, e.g. "params" or "context.pipeline" +// - vars: names of known variables +func ValidateNoReferencesToEntireProhibitedVariables(value, prefix string, vars sets.String) *apis.FieldError { + paths := []string{""} // Empty path is required to make the `ViaField`, … work vs, err := extractEntireVariablesFromString(value, prefix) if err != nil { return &apis.FieldError{ Message: fmt.Sprintf("extractEntireVariablesFromString failed : %v", err), - // Empty path is required to make the `ViaField`, … work - Paths: []string{""}, + Paths: paths, } } @@ -142,8 +126,7 @@ func ValidateEntireVariableProhibitedP(value, prefix string, vars sets.String) * if vars.Has(v) { return &apis.FieldError{ Message: fmt.Sprintf("variable type invalid in %q", value), - // Empty path is required to make the `ViaField`, … work - Paths: []string{""}, + Paths: paths, } } } @@ -151,43 +134,35 @@ func ValidateEntireVariableProhibitedP(value, prefix string, vars sets.String) * return nil } -// ValidateVariableIsolated verifies that variables matching the relevant string expressions are completely isolated if present. -func ValidateVariableIsolated(name, value, prefix, locationName, path string, vars sets.String) *apis.FieldError { - if vs, present, _ := ExtractVariablesFromString(value, prefix); present { - firstMatch, _ := extractExpressionFromString(value, prefix) - for _, v := range vs { - v = strings.TrimSuffix(v, "[*]") - if vars.Has(v) { - if len(value) != len(firstMatch) { - return &apis.FieldError{ - Message: fmt.Sprintf("variable is not properly isolated in %q for %s %s", value, locationName, name), - Paths: []string{path + "." + name}, - } - } - } - } - } - return nil -} - -// ValidateVariableIsolatedP verifies that variables matching the relevant string expressions are completely isolated if present. -func ValidateVariableIsolatedP(value, prefix string, vars sets.String) *apis.FieldError { +// ValidateVariableReferenceIsIsolated returns an error if the input string contains characters in addition to references to known parameters. +// For example, if "foo" is a known parameter, a value of "foo: $(params.foo)" returns an error, but a value of "$(params.foo)" does not. +// Inputs: +// - value: a string containing a reference to a variable that can be substituted, e.g. "echo $(params.foo)" +// - prefix: the prefix of the substitutable variable, e.g. "params" or "context.pipeline" +// - vars: names of known variables +func ValidateVariableReferenceIsIsolated(value, prefix string, vars sets.String) *apis.FieldError { + paths := []string{""} // Empty path is required to make the `ViaField`, … work if vs, present, errString := ExtractVariablesFromString(value, prefix); present { if errString != "" { return &apis.FieldError{ Message: errString, - Paths: []string{""}, + Paths: paths, + } + } + firstMatch, err := extractExpressionFromString(value, prefix) + if err != nil { + return &apis.FieldError{ + Message: err.Error(), + Paths: paths, } } - firstMatch, _ := extractExpressionFromString(value, prefix) for _, v := range vs { v = strings.TrimSuffix(v, "[*]") if vars.Has(v) { if len(value) != len(firstMatch) { return &apis.FieldError{ Message: fmt.Sprintf("variable is not properly isolated in %q", value), - // Empty path is required to make the `ViaField`, … work - Paths: []string{""}, + Paths: paths, } } } @@ -213,31 +188,37 @@ func ValidateWholeArrayOrObjectRefInStringVariable(name, value, prefix string, v } if isolatedVariableRegex.MatchString(value) { - return true, ValidateVariableP(value, prefix, vars).ViaFieldKey(prefix, name) + return true, ValidateNoReferencesToUnknownVariables(value, prefix, vars).ViaFieldKey(prefix, name) } return false, nil } -// extract a the first full string expressions found (e.g "$(input.params.foo)"). Return -// "" and false if nothing is found. -func extractExpressionFromString(s, prefix string) (string, bool) { +// extract a the first full string expressions found (e.g "$(input.params.foo)"). +// Returns "" if nothing is found. +func extractExpressionFromString(s, prefix string) (string, error) { pattern := fmt.Sprintf(braceMatchingRegex, prefix, parameterSubstitution, parameterSubstitution, parameterSubstitution) - re := regexp.MustCompile(pattern) + re, err := regexp.Compile(pattern) + if err != nil { + return "", err + } match := re.FindStringSubmatch(s) if match == nil { - return "", false + return "", nil } - return match[0], true + return match[0], nil } // ExtractVariablesFromString extracts variables from an input string s with the given prefix via regex matching. // It returns a slice of strings which contains the extracted variables, a bool flag to indicate if matches were found // and the error string if the referencing of parameters is invalid. -// If the string does not contain the input prefix then the output will contain an empty slice of strings. +// If the string does not contain the input prefix then the output will contain a slice of strings with length 0. func ExtractVariablesFromString(s, prefix string) ([]string, bool, string) { pattern := fmt.Sprintf(braceMatchingRegex, prefix, parameterSubstitution, parameterSubstitution, parameterSubstitution) - re := regexp.MustCompile(pattern) + re, err := regexp.Compile(pattern) + if err != nil { + return nil, false, "" + } matches := re.FindAllStringSubmatch(s, -1) errString := "" // Input string does not contain the prefix and therefore not matches are found. @@ -272,11 +253,12 @@ func ExtractVariablesFromString(s, prefix string) ([]string, bool, string) { return vars, true, errString } +// extractEntireVariablesFromString returns any references to entire array or object params in s with the given prefix func extractEntireVariablesFromString(s, prefix string) ([]string, error) { pattern := fmt.Sprintf(braceMatchingRegex, prefix, parameterSubstitution, parameterSubstitution, parameterSubstitution) re, err := regexp.Compile(pattern) if err != nil { - return nil, fmt.Errorf("Fail to parse regex pattern: %w", err) + return nil, fmt.Errorf("failed to parse regex pattern: %w", err) } matches := re.FindAllStringSubmatch(s, -1) @@ -308,7 +290,10 @@ func matchGroups(matches []string, pattern *regexp.Regexp) map[string]string { return groups } -// ApplyReplacements applies string replacements +// ApplyReplacements returns a string with references to parameters replaced, +// based on the mapping provided in replacements. +// For example, if the input string is "foo: $(params.foo)", and replacements maps "params.foo" to "bar", +// the output would be "foo: bar". func ApplyReplacements(in string, replacements map[string]string) string { replacementsList := []string{} for k, v := range replacements { diff --git a/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go b/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go index 0ffb315604..a411d542c6 100644 --- a/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go +++ b/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go @@ -1,7 +1,6 @@ package jsonpatch import ( - "bytes" "encoding/json" "fmt" "reflect" @@ -24,21 +23,28 @@ func (j *Operation) Json() string { } func (j *Operation) MarshalJSON() ([]byte, error) { - var b bytes.Buffer - b.WriteString("{") - b.WriteString(fmt.Sprintf(`"op":"%s"`, j.Operation)) - b.WriteString(fmt.Sprintf(`,"path":"%s"`, j.Path)) - // Consider omitting Value for non-nullable operations. - if j.Value != nil || j.Operation == "replace" || j.Operation == "add" { - v, err := json.Marshal(j.Value) - if err != nil { - return nil, err - } - b.WriteString(`,"value":`) - b.Write(v) - } - b.WriteString("}") - return b.Bytes(), nil + // Ensure for add and replace we emit `value: null` + if j.Value == nil && (j.Operation == "replace" || j.Operation == "add") { + return json.Marshal(struct { + Operation string `json:"op"` + Path string `json:"path"` + Value interface{} `json:"value"` + }{ + Operation: j.Operation, + Path: j.Path, + }) + } + // otherwise just marshal normally. We cannot literally do json.Marshal(j) as it would be recursively + // calling this function. + return json.Marshal(struct { + Operation string `json:"op"` + Path string `json:"path"` + Value interface{} `json:"value,omitempty"` + }{ + Operation: j.Operation, + Path: j.Path, + Value: j.Value, + }) } type ByPath []Operation @@ -149,9 +155,6 @@ func makePath(path string, newPart interface{}) string { if path == "" { return "/" + key } - if strings.HasSuffix(path, "/") { - return path + key - } return path + "/" + key } @@ -211,22 +214,18 @@ func handleValues(av, bv interface{}, p string, patch []Operation) ([]Operation, } case []interface{}: bt := bv.([]interface{}) - if isSimpleArray(at) && isSimpleArray(bt) { - patch = append(patch, compareEditDistance(at, bt, p)...) - } else { - n := min(len(at), len(bt)) - for i := len(at) - 1; i >= n; i-- { - patch = append(patch, NewOperation("remove", makePath(p, i), nil)) - } - for i := n; i < len(bt); i++ { - patch = append(patch, NewOperation("add", makePath(p, i), bt[i])) - } - for i := 0; i < n; i++ { - var err error - patch, err = handleValues(at[i], bt[i], makePath(p, i), patch) - if err != nil { - return nil, err - } + n := min(len(at), len(bt)) + for i := len(at) - 1; i >= n; i-- { + patch = append(patch, NewOperation("remove", makePath(p, i), nil)) + } + for i := n; i < len(bt); i++ { + patch = append(patch, NewOperation("add", makePath(p, i), bt[i])) + } + for i := 0; i < n; i++ { + var err error + patch, err = handleValues(at[i], bt[i], makePath(p, i), patch) + if err != nil { + return nil, err } } default: @@ -235,100 +234,9 @@ func handleValues(av, bv interface{}, p string, patch []Operation) ([]Operation, return patch, nil } -func isBasicType(a interface{}) bool { - switch a.(type) { - case string, float64, bool: - default: - return false - } - return true -} - -func isSimpleArray(a []interface{}) bool { - for i := range a { - switch a[i].(type) { - case string, float64, bool: - default: - val := reflect.ValueOf(a[i]) - if val.Kind() == reflect.Map { - for _, k := range val.MapKeys() { - av := val.MapIndex(k) - if av.Kind() == reflect.Ptr || av.Kind() == reflect.Interface { - if av.IsNil() { - continue - } - av = av.Elem() - } - if av.Kind() != reflect.String && av.Kind() != reflect.Float64 && av.Kind() != reflect.Bool { - return false - } - } - return true - } - return false - } - } - return true -} - -// https://en.wikipedia.org/wiki/Wagner%E2%80%93Fischer_algorithm -// Adapted from https://github.com/texttheater/golang-levenshtein -func compareEditDistance(s, t []interface{}, p string) []Operation { - m := len(s) - n := len(t) - - d := make([][]int, m+1) - for i := 0; i <= m; i++ { - d[i] = make([]int, n+1) - d[i][0] = i - } - for j := 0; j <= n; j++ { - d[0][j] = j - } - - for j := 1; j <= n; j++ { - for i := 1; i <= m; i++ { - if reflect.DeepEqual(s[i-1], t[j-1]) { - d[i][j] = d[i-1][j-1] // no op required - } else { - del := d[i-1][j] + 1 - add := d[i][j-1] + 1 - rep := d[i-1][j-1] + 1 - d[i][j] = min(rep, min(add, del)) - } - } - } - - return backtrace(s, t, p, m, n, d) -} - func min(x int, y int) int { if y < x { return y } return x } - -func backtrace(s, t []interface{}, p string, i int, j int, matrix [][]int) []Operation { - if i > 0 && matrix[i-1][j]+1 == matrix[i][j] { - op := NewOperation("remove", makePath(p, i-1), nil) - return append([]Operation{op}, backtrace(s, t, p, i-1, j, matrix)...) - } - if j > 0 && matrix[i][j-1]+1 == matrix[i][j] { - op := NewOperation("add", makePath(p, i), t[j-1]) - return append([]Operation{op}, backtrace(s, t, p, i, j-1, matrix)...) - } - if i > 0 && j > 0 && matrix[i-1][j-1]+1 == matrix[i][j] { - if isBasicType(s[0]) { - op := NewOperation("replace", makePath(p, i-1), t[j-1]) - return append([]Operation{op}, backtrace(s, t, p, i-1, j-1, matrix)...) - } - - p2, _ := handleValues(s[i-1], t[j-1], makePath(p, i-1), []Operation{}) - return append(p2, backtrace(s, t, p, i-1, j-1, matrix)...) - } - if i > 0 && j > 0 && matrix[i-1][j-1] == matrix[i][j] { - return backtrace(s, t, p, i-1, j-1, matrix) - } - return []Operation{} -} diff --git a/vendor/k8s.io/api/admission/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admission/v1/types_swagger_doc_generated.go index f81594c912..1395a7e107 100644 --- a/vendor/k8s.io/api/admission/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/admission/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AdmissionRequest = map[string]string{ diff --git a/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go index 13067ad80d..82598ed573 100644 --- a/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AdmissionRequest = map[string]string{ diff --git a/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go index 6ac9e80ffc..9a2d0bccdd 100644 --- a/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go +++ b/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go @@ -44,10 +44,38 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +func (m *MatchCondition) Reset() { *m = MatchCondition{} } +func (*MatchCondition) ProtoMessage() {} +func (*MatchCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_aaac5994f79683e8, []int{0} +} +func (m *MatchCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MatchCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MatchCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_MatchCondition.Merge(m, src) +} +func (m *MatchCondition) XXX_Size() int { + return m.Size() +} +func (m *MatchCondition) XXX_DiscardUnknown() { + xxx_messageInfo_MatchCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_MatchCondition proto.InternalMessageInfo + func (m *MutatingWebhook) Reset() { *m = MutatingWebhook{} } func (*MutatingWebhook) ProtoMessage() {} func (*MutatingWebhook) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{0} + return fileDescriptor_aaac5994f79683e8, []int{1} } func (m *MutatingWebhook) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -75,7 +103,7 @@ var xxx_messageInfo_MutatingWebhook proto.InternalMessageInfo func (m *MutatingWebhookConfiguration) Reset() { *m = MutatingWebhookConfiguration{} } func (*MutatingWebhookConfiguration) ProtoMessage() {} func (*MutatingWebhookConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{1} + return fileDescriptor_aaac5994f79683e8, []int{2} } func (m *MutatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -103,7 +131,7 @@ var xxx_messageInfo_MutatingWebhookConfiguration proto.InternalMessageInfo func (m *MutatingWebhookConfigurationList) Reset() { *m = MutatingWebhookConfigurationList{} } func (*MutatingWebhookConfigurationList) ProtoMessage() {} func (*MutatingWebhookConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{2} + return fileDescriptor_aaac5994f79683e8, []int{3} } func (m *MutatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -131,7 +159,7 @@ var xxx_messageInfo_MutatingWebhookConfigurationList proto.InternalMessageInfo func (m *Rule) Reset() { *m = Rule{} } func (*Rule) ProtoMessage() {} func (*Rule) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{3} + return fileDescriptor_aaac5994f79683e8, []int{4} } func (m *Rule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -159,7 +187,7 @@ var xxx_messageInfo_Rule proto.InternalMessageInfo func (m *RuleWithOperations) Reset() { *m = RuleWithOperations{} } func (*RuleWithOperations) ProtoMessage() {} func (*RuleWithOperations) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{4} + return fileDescriptor_aaac5994f79683e8, []int{5} } func (m *RuleWithOperations) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -187,7 +215,7 @@ var xxx_messageInfo_RuleWithOperations proto.InternalMessageInfo func (m *ServiceReference) Reset() { *m = ServiceReference{} } func (*ServiceReference) ProtoMessage() {} func (*ServiceReference) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{5} + return fileDescriptor_aaac5994f79683e8, []int{6} } func (m *ServiceReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -215,7 +243,7 @@ var xxx_messageInfo_ServiceReference proto.InternalMessageInfo func (m *ValidatingWebhook) Reset() { *m = ValidatingWebhook{} } func (*ValidatingWebhook) ProtoMessage() {} func (*ValidatingWebhook) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{6} + return fileDescriptor_aaac5994f79683e8, []int{7} } func (m *ValidatingWebhook) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -243,7 +271,7 @@ var xxx_messageInfo_ValidatingWebhook proto.InternalMessageInfo func (m *ValidatingWebhookConfiguration) Reset() { *m = ValidatingWebhookConfiguration{} } func (*ValidatingWebhookConfiguration) ProtoMessage() {} func (*ValidatingWebhookConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{7} + return fileDescriptor_aaac5994f79683e8, []int{8} } func (m *ValidatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -271,7 +299,7 @@ var xxx_messageInfo_ValidatingWebhookConfiguration proto.InternalMessageInfo func (m *ValidatingWebhookConfigurationList) Reset() { *m = ValidatingWebhookConfigurationList{} } func (*ValidatingWebhookConfigurationList) ProtoMessage() {} func (*ValidatingWebhookConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{8} + return fileDescriptor_aaac5994f79683e8, []int{9} } func (m *ValidatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -299,7 +327,7 @@ var xxx_messageInfo_ValidatingWebhookConfigurationList proto.InternalMessageInfo func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } func (*WebhookClientConfig) ProtoMessage() {} func (*WebhookClientConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{9} + return fileDescriptor_aaac5994f79683e8, []int{10} } func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -325,6 +353,7 @@ func (m *WebhookClientConfig) XXX_DiscardUnknown() { var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo func init() { + proto.RegisterType((*MatchCondition)(nil), "k8s.io.api.admissionregistration.v1.MatchCondition") proto.RegisterType((*MutatingWebhook)(nil), "k8s.io.api.admissionregistration.v1.MutatingWebhook") proto.RegisterType((*MutatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1.MutatingWebhookConfiguration") proto.RegisterType((*MutatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1.MutatingWebhookConfigurationList") @@ -342,79 +371,116 @@ func init() { } var fileDescriptor_aaac5994f79683e8 = []byte{ - // 1105 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x57, 0x4f, 0x6f, 0x1b, 0x45, - 0x14, 0xcf, 0xc6, 0x76, 0x63, 0x8f, 0xf3, 0xa7, 0x19, 0xa0, 0x35, 0xa1, 0xf2, 0x5a, 0xae, 0x84, - 0x8c, 0x80, 0xdd, 0x26, 0x94, 0x52, 0x71, 0x41, 0xd9, 0xf0, 0x47, 0x11, 0x49, 0x1b, 0x4d, 0xda, - 0x14, 0xa1, 0x1c, 0x3a, 0x5e, 0x8f, 0xed, 0x21, 0xf6, 0xce, 0x6a, 0x66, 0xd6, 0x90, 0x1b, 0x1f, - 0x81, 0xaf, 0x00, 0x9f, 0x82, 0x1b, 0xe2, 0x96, 0x63, 0x8f, 0x39, 0xa0, 0x85, 0x2c, 0x17, 0x0e, - 0x7c, 0x82, 0x9c, 0xd0, 0xcc, 0xae, 0x77, 0xfd, 0x27, 0x09, 0x56, 0x0e, 0x3d, 0xe5, 0xe6, 0xf9, - 0xbd, 0x79, 0xbf, 0x37, 0xef, 0xed, 0x7b, 0xef, 0x27, 0x83, 0x9d, 0xa3, 0xc7, 0xc2, 0xa2, 0xcc, - 0x3e, 0x0a, 0x9a, 0x84, 0x7b, 0x44, 0x12, 0x61, 0x0f, 0x88, 0xd7, 0x62, 0xdc, 0x4e, 0x0c, 0xd8, - 0xa7, 0x36, 0x6e, 0xf5, 0xa9, 0x10, 0x94, 0x79, 0x9c, 0x74, 0xa8, 0x90, 0x1c, 0x4b, 0xca, 0x3c, - 0x7b, 0xb0, 0x6e, 0x77, 0x88, 0x47, 0x38, 0x96, 0xa4, 0x65, 0xf9, 0x9c, 0x49, 0x06, 0xef, 0xc7, - 0x4e, 0x16, 0xf6, 0xa9, 0x75, 0xa1, 0x93, 0x35, 0x58, 0x5f, 0xfb, 0xb0, 0x43, 0x65, 0x37, 0x68, - 0x5a, 0x2e, 0xeb, 0xdb, 0x1d, 0xd6, 0x61, 0xb6, 0xf6, 0x6d, 0x06, 0x6d, 0x7d, 0xd2, 0x07, 0xfd, - 0x2b, 0xe6, 0x5c, 0x7b, 0x98, 0x3d, 0xa4, 0x8f, 0xdd, 0x2e, 0xf5, 0x08, 0x3f, 0xb6, 0xfd, 0xa3, - 0x8e, 0x02, 0x84, 0xdd, 0x27, 0x12, 0x5f, 0xf0, 0x92, 0x35, 0xfb, 0x32, 0x2f, 0x1e, 0x78, 0x92, - 0xf6, 0xc9, 0x94, 0xc3, 0xa3, 0xff, 0x73, 0x10, 0x6e, 0x97, 0xf4, 0xf1, 0xa4, 0x5f, 0xfd, 0xb7, - 0x05, 0xb0, 0xb2, 0x1b, 0x48, 0x2c, 0xa9, 0xd7, 0x79, 0x41, 0x9a, 0x5d, 0xc6, 0x8e, 0x60, 0x0d, - 0xe4, 0x3d, 0xdc, 0x27, 0x15, 0xa3, 0x66, 0x34, 0x4a, 0xce, 0xe2, 0x49, 0x68, 0xce, 0x45, 0xa1, - 0x99, 0x7f, 0x82, 0xfb, 0x04, 0x69, 0x0b, 0xe4, 0x60, 0xd1, 0xed, 0x51, 0xe2, 0xc9, 0x2d, 0xe6, - 0xb5, 0x69, 0xa7, 0x32, 0x5f, 0x33, 0x1a, 0xe5, 0x8d, 0xc7, 0xd6, 0x0c, 0xf5, 0xb3, 0x92, 0x28, - 0x5b, 0x23, 0xfe, 0xce, 0x9b, 0x49, 0x8c, 0xc5, 0x51, 0x14, 0x8d, 0xc5, 0x80, 0x87, 0xa0, 0xc0, - 0x83, 0x1e, 0x11, 0x95, 0x5c, 0x2d, 0xd7, 0x28, 0x6f, 0x7c, 0x32, 0x53, 0x30, 0x14, 0xf4, 0xc8, - 0x0b, 0x2a, 0xbb, 0x4f, 0x7d, 0x12, 0x83, 0xc2, 0x59, 0x4a, 0x62, 0x15, 0x94, 0x4d, 0xa0, 0x98, - 0x14, 0xee, 0x80, 0xa5, 0x36, 0xa6, 0xbd, 0x80, 0x93, 0x3d, 0xd6, 0xa3, 0xee, 0x71, 0x25, 0xaf, - 0x93, 0x7f, 0x37, 0x0a, 0xcd, 0xa5, 0x2f, 0x47, 0x0d, 0xe7, 0xa1, 0xb9, 0x3a, 0x06, 0x3c, 0x3b, - 0xf6, 0x09, 0x1a, 0x77, 0x86, 0x9f, 0x83, 0x72, 0x1f, 0x4b, 0xb7, 0x9b, 0x70, 0x95, 0x34, 0x57, - 0x3d, 0x0a, 0xcd, 0xf2, 0x6e, 0x06, 0x9f, 0x87, 0xe6, 0xca, 0xc8, 0x51, 0xf3, 0x8c, 0xba, 0xc1, - 0x1f, 0xc0, 0xaa, 0xaa, 0xb6, 0xf0, 0xb1, 0x4b, 0xf6, 0x49, 0x8f, 0xb8, 0x92, 0xf1, 0x4a, 0x41, - 0x97, 0xfa, 0xa3, 0x91, 0xec, 0xd3, 0xef, 0x6d, 0xf9, 0x47, 0x1d, 0x05, 0x08, 0x4b, 0xb5, 0x95, - 0x4a, 0x7f, 0x07, 0x37, 0x49, 0x6f, 0xe8, 0xea, 0xbc, 0x15, 0x85, 0xe6, 0xea, 0x93, 0x49, 0x46, - 0x34, 0x1d, 0x04, 0x32, 0xb0, 0xcc, 0x9a, 0xdf, 0x11, 0x57, 0xa6, 0x61, 0xcb, 0xd7, 0x0f, 0x0b, - 0xa3, 0xd0, 0x5c, 0x7e, 0x3a, 0x46, 0x87, 0x26, 0xe8, 0x55, 0xc1, 0x04, 0x6d, 0x91, 0x2f, 0xda, - 0x6d, 0xe2, 0x4a, 0x51, 0xb9, 0x95, 0x15, 0x6c, 0x3f, 0x83, 0x55, 0xc1, 0xb2, 0xe3, 0x56, 0x0f, - 0x0b, 0x81, 0x46, 0xdd, 0xe0, 0xa7, 0x60, 0x59, 0xf5, 0x3a, 0x0b, 0xe4, 0x3e, 0x71, 0x99, 0xd7, - 0x12, 0x95, 0x85, 0x9a, 0xd1, 0x28, 0xc4, 0x2f, 0x78, 0x36, 0x66, 0x41, 0x13, 0x37, 0xe1, 0x73, - 0x70, 0x37, 0xed, 0x22, 0x44, 0x06, 0x94, 0x7c, 0x7f, 0x40, 0xb8, 0x3a, 0x88, 0x4a, 0xb1, 0x96, - 0x6b, 0x94, 0x9c, 0x77, 0xa2, 0xd0, 0xbc, 0xbb, 0x79, 0xf1, 0x15, 0x74, 0x99, 0x2f, 0x7c, 0x09, - 0x20, 0x27, 0xd4, 0x1b, 0x30, 0x57, 0xb7, 0x5f, 0xd2, 0x10, 0x40, 0xe7, 0xf7, 0x20, 0x0a, 0x4d, - 0x88, 0xa6, 0xac, 0xe7, 0xa1, 0x79, 0x67, 0x1a, 0xd5, 0xed, 0x71, 0x01, 0x57, 0xfd, 0xd4, 0x00, - 0xf7, 0x26, 0x26, 0x38, 0x9e, 0x98, 0x20, 0xee, 0x78, 0xf8, 0x12, 0x14, 0xd5, 0x87, 0x69, 0x61, - 0x89, 0xf5, 0x48, 0x97, 0x37, 0x1e, 0xcc, 0xf6, 0x19, 0xe3, 0x6f, 0xb6, 0x4b, 0x24, 0x76, 0x60, - 0x32, 0x34, 0x20, 0xc3, 0x50, 0xca, 0x0a, 0x0f, 0x40, 0x31, 0x89, 0x2c, 0x2a, 0xf3, 0x7a, 0x3a, - 0x1f, 0xce, 0x34, 0x9d, 0x13, 0xcf, 0x76, 0xf2, 0x2a, 0x0a, 0x4a, 0xb9, 0xea, 0xff, 0x18, 0xa0, - 0x76, 0x55, 0x6a, 0x3b, 0x54, 0x48, 0x78, 0x38, 0x95, 0x9e, 0x35, 0x63, 0x97, 0x52, 0x11, 0x27, - 0x77, 0x3b, 0x49, 0xae, 0x38, 0x44, 0x46, 0x52, 0x6b, 0x83, 0x02, 0x95, 0xa4, 0x3f, 0xcc, 0x6b, - 0xf3, 0x3a, 0x79, 0x8d, 0xbd, 0x39, 0xdb, 0x3f, 0xdb, 0x8a, 0x17, 0xc5, 0xf4, 0xf5, 0xdf, 0x0d, - 0x90, 0x57, 0x0b, 0x09, 0xbe, 0x0f, 0x4a, 0xd8, 0xa7, 0x5f, 0x71, 0x16, 0xf8, 0xa2, 0x62, 0xe8, - 0xce, 0x5b, 0x8a, 0x42, 0xb3, 0xb4, 0xb9, 0xb7, 0x1d, 0x83, 0x28, 0xb3, 0xc3, 0x75, 0x50, 0xc6, - 0x3e, 0x4d, 0x1b, 0x75, 0x5e, 0x5f, 0x5f, 0x51, 0x63, 0xb3, 0xb9, 0xb7, 0x9d, 0x36, 0xe7, 0xe8, - 0x1d, 0xc5, 0xcf, 0x89, 0x60, 0x01, 0x77, 0x93, 0x55, 0x9a, 0xf0, 0xa3, 0x21, 0x88, 0x32, 0x3b, - 0xfc, 0x00, 0x14, 0x84, 0xcb, 0x7c, 0x92, 0x6c, 0xc3, 0x3b, 0xea, 0xd9, 0xfb, 0x0a, 0x38, 0x0f, - 0xcd, 0x92, 0xfe, 0xa1, 0xdb, 0x32, 0xbe, 0x54, 0xff, 0xc5, 0x00, 0x70, 0x7a, 0xe1, 0xc2, 0xcf, - 0x00, 0x60, 0xe9, 0x29, 0x49, 0xc9, 0xd4, 0xbd, 0x94, 0xa2, 0xe7, 0xa1, 0xb9, 0x94, 0x9e, 0x34, - 0xe5, 0x88, 0x0b, 0xfc, 0x1a, 0xe4, 0xd5, 0x92, 0x4e, 0x54, 0xe6, 0xbd, 0x99, 0x17, 0x7f, 0x26, - 0x5d, 0xea, 0x84, 0x34, 0x49, 0xfd, 0x67, 0x03, 0xdc, 0xde, 0x27, 0x7c, 0x40, 0x5d, 0x82, 0x48, - 0x9b, 0x70, 0xe2, 0xb9, 0x04, 0xda, 0xa0, 0x94, 0x2e, 0xc1, 0x44, 0xf6, 0x56, 0x13, 0xdf, 0x52, - 0xba, 0x30, 0x51, 0x76, 0x27, 0x95, 0xc8, 0xf9, 0x4b, 0x25, 0xf2, 0x1e, 0xc8, 0xfb, 0x58, 0x76, - 0x2b, 0x39, 0x7d, 0xa3, 0xa8, 0xac, 0x7b, 0x58, 0x76, 0x91, 0x46, 0xb5, 0x95, 0x71, 0xa9, 0xeb, - 0x5a, 0x48, 0xac, 0x8c, 0x4b, 0xa4, 0xd1, 0xfa, 0x9f, 0xb7, 0xc0, 0xea, 0x01, 0xee, 0xd1, 0xd6, - 0x8d, 0x2c, 0xdf, 0xc8, 0xf2, 0x95, 0xb2, 0x0c, 0x6e, 0x64, 0xf9, 0x3a, 0xb2, 0x5c, 0xff, 0xc3, - 0x00, 0xd5, 0xa9, 0x09, 0x7b, 0xdd, 0xb2, 0xf9, 0xcd, 0x94, 0x6c, 0x3e, 0x9a, 0x69, 0x7a, 0xa6, - 0x1e, 0x3e, 0x25, 0x9c, 0xff, 0x1a, 0xa0, 0x7e, 0x75, 0x7a, 0xaf, 0x41, 0x3a, 0xbb, 0xe3, 0xd2, - 0xb9, 0x75, 0xbd, 0xdc, 0x66, 0x11, 0xcf, 0x5f, 0x0d, 0xf0, 0xc6, 0x05, 0xfb, 0x0b, 0xbe, 0x0d, - 0x72, 0x01, 0xef, 0x25, 0x2b, 0x78, 0x21, 0x0a, 0xcd, 0xdc, 0x73, 0xb4, 0x83, 0x14, 0x06, 0x0f, - 0xc1, 0x82, 0x88, 0x55, 0x20, 0xc9, 0xfc, 0xe3, 0x99, 0x9e, 0x37, 0xa9, 0x1c, 0x4e, 0x39, 0x0a, - 0xcd, 0x85, 0x21, 0x3a, 0xa4, 0x84, 0x0d, 0x50, 0x74, 0xb1, 0x13, 0x78, 0xad, 0x44, 0xb5, 0x16, - 0x9d, 0x45, 0x55, 0xa4, 0xad, 0xcd, 0x18, 0x43, 0xa9, 0xd5, 0xd9, 0x3e, 0x39, 0xab, 0xce, 0xbd, - 0x3a, 0xab, 0xce, 0x9d, 0x9e, 0x55, 0xe7, 0x7e, 0x8c, 0xaa, 0xc6, 0x49, 0x54, 0x35, 0x5e, 0x45, - 0x55, 0xe3, 0x34, 0xaa, 0x1a, 0x7f, 0x45, 0x55, 0xe3, 0xa7, 0xbf, 0xab, 0x73, 0xdf, 0xde, 0x9f, - 0xe1, 0xdf, 0xec, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x43, 0x44, 0x86, 0xf5, 0x0c, 0x0f, 0x00, + // 1169 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0x4d, 0x6f, 0x1b, 0x45, + 0x18, 0xce, 0xc6, 0x36, 0xb1, 0xc7, 0x4e, 0xd2, 0x0c, 0xd0, 0x2e, 0xa5, 0xf2, 0x5a, 0xae, 0x84, + 0x82, 0x00, 0x6f, 0x9b, 0x96, 0x52, 0x71, 0x41, 0xb1, 0x29, 0x28, 0x22, 0x69, 0xa3, 0x49, 0x3f, + 0x10, 0xea, 0xa1, 0xe3, 0xf5, 0xd8, 0x1e, 0x62, 0xef, 0xac, 0x66, 0x66, 0x4d, 0x7b, 0xe3, 0x27, + 0xf0, 0x17, 0xe0, 0x4f, 0xc0, 0x95, 0x5b, 0x8f, 0xbd, 0x91, 0x03, 0x5a, 0x91, 0xe5, 0xc2, 0x81, + 0x5f, 0x90, 0x13, 0x9a, 0xd9, 0xf5, 0xae, 0xbf, 0x12, 0x56, 0x39, 0xe4, 0x94, 0x5b, 0xe6, 0x79, + 0xdf, 0xf7, 0x79, 0xe7, 0x19, 0xbf, 0x1f, 0xab, 0x80, 0xdd, 0xc3, 0xfb, 0xa2, 0x41, 0x99, 0x7d, + 0xe8, 0xb7, 0x09, 0x77, 0x89, 0x24, 0xc2, 0x1e, 0x11, 0xb7, 0xc3, 0xb8, 0x1d, 0x1b, 0xb0, 0x47, + 0x6d, 0xdc, 0x19, 0x52, 0x21, 0x28, 0x73, 0x39, 0xe9, 0x51, 0x21, 0x39, 0x96, 0x94, 0xb9, 0xf6, + 0xe8, 0xb6, 0xdd, 0x23, 0x2e, 0xe1, 0x58, 0x92, 0x4e, 0xc3, 0xe3, 0x4c, 0x32, 0x78, 0x33, 0x0a, + 0x6a, 0x60, 0x8f, 0x36, 0x16, 0x06, 0x35, 0x46, 0xb7, 0xaf, 0x7f, 0xd2, 0xa3, 0xb2, 0xef, 0xb7, + 0x1b, 0x0e, 0x1b, 0xda, 0x3d, 0xd6, 0x63, 0xb6, 0x8e, 0x6d, 0xfb, 0x5d, 0x7d, 0xd2, 0x07, 0xfd, + 0x57, 0xc4, 0x79, 0xfd, 0x6e, 0x7a, 0x91, 0x21, 0x76, 0xfa, 0xd4, 0x25, 0xfc, 0x95, 0xed, 0x1d, + 0xf6, 0x14, 0x20, 0xec, 0x21, 0x91, 0x78, 0xc1, 0x4d, 0xae, 0xdb, 0xa7, 0x45, 0x71, 0xdf, 0x95, + 0x74, 0x48, 0xe6, 0x02, 0xee, 0xfd, 0x5f, 0x80, 0x70, 0xfa, 0x64, 0x88, 0x67, 0xe3, 0xea, 0x5d, + 0xb0, 0xb6, 0x87, 0xa5, 0xd3, 0x6f, 0x31, 0xb7, 0x43, 0x95, 0x44, 0x58, 0x03, 0x79, 0x17, 0x0f, + 0x89, 0x69, 0xd4, 0x8c, 0xcd, 0x52, 0xb3, 0xf2, 0x3a, 0xb0, 0x96, 0xc2, 0xc0, 0xca, 0x3f, 0xc4, + 0x43, 0x82, 0xb4, 0x05, 0x6e, 0x01, 0x40, 0x5e, 0x7a, 0x9c, 0xe8, 0xe7, 0x31, 0x97, 0xb5, 0x1f, + 0x8c, 0xfd, 0xc0, 0x83, 0xc4, 0x82, 0x26, 0xbc, 0xea, 0xbf, 0x16, 0xc1, 0xfa, 0x9e, 0x2f, 0xb1, + 0xa4, 0x6e, 0xef, 0x19, 0x69, 0xf7, 0x19, 0x3b, 0xcc, 0x90, 0x89, 0x83, 0x8a, 0x33, 0xa0, 0xc4, + 0x95, 0x2d, 0xe6, 0x76, 0x69, 0x4f, 0xe7, 0x2a, 0x6f, 0xdd, 0x6f, 0x64, 0xf8, 0x9d, 0x1a, 0x71, + 0x96, 0xd6, 0x44, 0x7c, 0xf3, 0x9d, 0x38, 0x47, 0x65, 0x12, 0x45, 0x53, 0x39, 0xe0, 0x73, 0x50, + 0xe0, 0xfe, 0x80, 0x08, 0x33, 0x57, 0xcb, 0x6d, 0x96, 0xb7, 0x3e, 0xcb, 0x94, 0x0c, 0xf9, 0x03, + 0xf2, 0x8c, 0xca, 0xfe, 0x23, 0x8f, 0x44, 0xa0, 0x68, 0xae, 0xc6, 0xb9, 0x0a, 0xca, 0x26, 0x50, + 0x44, 0x0a, 0x77, 0xc1, 0x6a, 0x17, 0xd3, 0x81, 0xcf, 0xc9, 0x3e, 0x1b, 0x50, 0xe7, 0x95, 0x99, + 0xd7, 0xe2, 0x3f, 0x08, 0x03, 0x6b, 0xf5, 0xab, 0x49, 0xc3, 0x49, 0x60, 0x6d, 0x4c, 0x01, 0x8f, + 0x5f, 0x79, 0x04, 0x4d, 0x07, 0xc3, 0x2f, 0x41, 0x79, 0xa8, 0x7e, 0xbd, 0x98, 0xab, 0xa4, 0xb9, + 0xea, 0x61, 0x60, 0x95, 0xf7, 0x52, 0xf8, 0x24, 0xb0, 0xd6, 0x27, 0x8e, 0x9a, 0x67, 0x32, 0x0c, + 0xbe, 0x04, 0x1b, 0xea, 0xb5, 0x85, 0x87, 0x1d, 0x72, 0x40, 0x06, 0xc4, 0x91, 0x8c, 0x9b, 0x05, + 0xfd, 0xd4, 0x77, 0x26, 0xd4, 0x27, 0x75, 0xd5, 0xf0, 0x0e, 0x7b, 0x0a, 0x10, 0x0d, 0x55, 0xbe, + 0x4a, 0xfe, 0x2e, 0x6e, 0x93, 0xc1, 0x38, 0xb4, 0xf9, 0x6e, 0x18, 0x58, 0x1b, 0x0f, 0x67, 0x19, + 0xd1, 0x7c, 0x12, 0xc8, 0xc0, 0x1a, 0x6b, 0x7f, 0x4f, 0x1c, 0x99, 0xa4, 0x2d, 0x9f, 0x3f, 0x2d, + 0x0c, 0x03, 0x6b, 0xed, 0xd1, 0x14, 0x1d, 0x9a, 0xa1, 0x57, 0x0f, 0x26, 0x68, 0x87, 0x3c, 0xe8, + 0x76, 0x89, 0x23, 0x85, 0xf9, 0x56, 0xfa, 0x60, 0x07, 0x29, 0xac, 0x1e, 0x2c, 0x3d, 0xb6, 0x06, + 0x58, 0x08, 0x34, 0x19, 0x06, 0x3f, 0x07, 0x6b, 0xaa, 0xa7, 0x98, 0x2f, 0x0f, 0x88, 0xc3, 0xdc, + 0x8e, 0x30, 0x57, 0x6a, 0xc6, 0x66, 0x21, 0xba, 0xc1, 0xe3, 0x29, 0x0b, 0x9a, 0xf1, 0x84, 0x4f, + 0xc0, 0xb5, 0xa4, 0x8a, 0x10, 0x19, 0x51, 0xf2, 0xc3, 0x53, 0xc2, 0xd5, 0x41, 0x98, 0xc5, 0x5a, + 0x6e, 0xb3, 0xd4, 0x7c, 0x3f, 0x0c, 0xac, 0x6b, 0xdb, 0x8b, 0x5d, 0xd0, 0x69, 0xb1, 0xf0, 0x05, + 0x80, 0x9c, 0x50, 0x77, 0xc4, 0x1c, 0x5d, 0x7e, 0x71, 0x41, 0x00, 0xad, 0xef, 0x56, 0x18, 0x58, + 0x10, 0xcd, 0x59, 0x4f, 0x02, 0xeb, 0xea, 0x3c, 0xaa, 0xcb, 0x63, 0x01, 0x17, 0x1c, 0x81, 0xf5, + 0xe1, 0xd4, 0xa4, 0x10, 0x66, 0x45, 0x77, 0xc8, 0x9d, 0x4c, 0x1d, 0x32, 0x3d, 0x65, 0x9a, 0xd7, + 0xe2, 0xee, 0x58, 0x9f, 0xc6, 0x05, 0x9a, 0x4d, 0x52, 0x3f, 0x32, 0xc0, 0x8d, 0x99, 0xc9, 0x11, + 0x75, 0xaa, 0x1f, 0x91, 0xc3, 0x17, 0xa0, 0xa8, 0x0a, 0xa2, 0x83, 0x25, 0xd6, 0xa3, 0xa4, 0xbc, + 0x75, 0x2b, 0x5b, 0xf9, 0x44, 0xb5, 0xb2, 0x47, 0x24, 0x4e, 0xc7, 0x57, 0x8a, 0xa1, 0x84, 0x15, + 0x3e, 0x05, 0xc5, 0x38, 0xb3, 0x30, 0x97, 0xb5, 0xe6, 0xbb, 0xd9, 0x34, 0x4f, 0x5f, 0xbb, 0x99, + 0x57, 0x59, 0x50, 0xc2, 0x55, 0xff, 0xc7, 0x00, 0xb5, 0xb3, 0xa4, 0xed, 0x52, 0x21, 0xe1, 0xf3, + 0x39, 0x79, 0x8d, 0x8c, 0xdd, 0x41, 0x45, 0x24, 0xee, 0x4a, 0x2c, 0xae, 0x38, 0x46, 0x26, 0xa4, + 0x75, 0x41, 0x81, 0x4a, 0x32, 0x1c, 0xeb, 0xda, 0x3e, 0x8f, 0xae, 0xa9, 0x3b, 0xa7, 0x73, 0x6f, + 0x47, 0xf1, 0xa2, 0x88, 0xbe, 0xfe, 0xbb, 0x01, 0xf2, 0x6a, 0x10, 0xc2, 0x8f, 0x40, 0x09, 0x7b, + 0xf4, 0x6b, 0xce, 0x7c, 0x4f, 0x98, 0x86, 0xae, 0xf8, 0xd5, 0x30, 0xb0, 0x4a, 0xdb, 0xfb, 0x3b, + 0x11, 0x88, 0x52, 0x3b, 0xbc, 0x0d, 0xca, 0xd8, 0xa3, 0x49, 0x83, 0x2c, 0x6b, 0xf7, 0x75, 0xd5, + 0xae, 0xdb, 0xfb, 0x3b, 0x49, 0x53, 0x4c, 0xfa, 0x28, 0x7e, 0x4e, 0x04, 0xf3, 0xb9, 0x13, 0x8f, + 0xf0, 0x98, 0x1f, 0x8d, 0x41, 0x94, 0xda, 0xe1, 0xc7, 0xa0, 0x20, 0x1c, 0xe6, 0x91, 0x78, 0x0a, + 0x5f, 0x55, 0xd7, 0x3e, 0x50, 0xc0, 0x49, 0x60, 0x95, 0xf4, 0x1f, 0xba, 0x1d, 0x22, 0xa7, 0xfa, + 0x2f, 0x06, 0x80, 0xf3, 0x83, 0x1e, 0x7e, 0x01, 0x00, 0x4b, 0x4e, 0xb1, 0x24, 0x4b, 0xd7, 0x52, + 0x82, 0x9e, 0x04, 0xd6, 0x6a, 0x72, 0xd2, 0x94, 0x13, 0x21, 0xf0, 0x1b, 0x90, 0x57, 0xcb, 0x21, + 0xde, 0x6e, 0x1f, 0x66, 0x5e, 0x38, 0xe9, 0xca, 0x54, 0x27, 0xa4, 0x49, 0xea, 0x3f, 0x1b, 0xe0, + 0xca, 0x01, 0xe1, 0x23, 0xea, 0x10, 0x44, 0xba, 0x84, 0x13, 0xd7, 0x21, 0xd0, 0x06, 0xa5, 0x64, + 0xf8, 0xc6, 0xeb, 0x76, 0x23, 0x8e, 0x2d, 0x25, 0x83, 0x1a, 0xa5, 0x3e, 0xc9, 0x6a, 0x5e, 0x3e, + 0x75, 0x35, 0xdf, 0x00, 0x79, 0x0f, 0xcb, 0xbe, 0x99, 0xd3, 0x1e, 0x45, 0x65, 0xdd, 0xc7, 0xb2, + 0x8f, 0x34, 0xaa, 0xad, 0x8c, 0x4b, 0xfd, 0xae, 0x85, 0xd8, 0xca, 0xb8, 0x44, 0x1a, 0xad, 0xff, + 0xb1, 0x02, 0x36, 0x9e, 0xe2, 0x01, 0xed, 0x5c, 0x7e, 0x0e, 0x5c, 0x7e, 0x0e, 0x9c, 0xf9, 0x39, + 0x00, 0x2e, 0x3f, 0x07, 0xce, 0xf5, 0x39, 0xb0, 0x60, 0x59, 0x97, 0x2f, 0x62, 0x59, 0xff, 0x69, + 0x80, 0xea, 0x5c, 0x67, 0x5f, 0xf4, 0xba, 0xfe, 0x76, 0x6e, 0x5d, 0xdf, 0xcb, 0xa4, 0x7a, 0xee, + 0xe2, 0x73, 0x0b, 0xfb, 0x5f, 0x03, 0xd4, 0xcf, 0x96, 0x77, 0x01, 0x2b, 0xbb, 0x3f, 0xbd, 0xb2, + 0x5b, 0xe7, 0xd3, 0x96, 0x65, 0x69, 0xff, 0x66, 0x80, 0xb7, 0x17, 0xcc, 0x4d, 0xf8, 0x1e, 0xc8, + 0xf9, 0x7c, 0x10, 0x8f, 0xfe, 0x95, 0x30, 0xb0, 0x72, 0x4f, 0xd0, 0x2e, 0x52, 0x18, 0x7c, 0x0e, + 0x56, 0x44, 0xb4, 0x7d, 0x62, 0xe5, 0x9f, 0x66, 0xba, 0xde, 0xec, 0xc6, 0x6a, 0x96, 0xc3, 0xc0, + 0x5a, 0x19, 0xa3, 0x63, 0x4a, 0xb8, 0x09, 0x8a, 0x0e, 0x6e, 0xfa, 0x6e, 0x27, 0xde, 0x96, 0x95, + 0x66, 0x45, 0x3d, 0x52, 0x6b, 0x3b, 0xc2, 0x50, 0x62, 0x6d, 0xee, 0xbc, 0x3e, 0xae, 0x2e, 0xbd, + 0x39, 0xae, 0x2e, 0x1d, 0x1d, 0x57, 0x97, 0x7e, 0x0c, 0xab, 0xc6, 0xeb, 0xb0, 0x6a, 0xbc, 0x09, + 0xab, 0xc6, 0x51, 0x58, 0x35, 0xfe, 0x0a, 0xab, 0xc6, 0x4f, 0x7f, 0x57, 0x97, 0xbe, 0xbb, 0x99, + 0xe1, 0xbf, 0x04, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x7f, 0xe1, 0x3a, 0x73, 0x64, 0x10, 0x00, 0x00, } +func (m *MatchCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MatchCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MatchCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *MutatingWebhook) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -435,6 +501,20 @@ func (m *MutatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.MatchConditions) > 0 { + for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + } if m.ObjectSelector != nil { { size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) @@ -791,6 +871,20 @@ func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.MatchConditions) > 0 { + for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + } if m.ObjectSelector != nil { { size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) @@ -1036,6 +1130,19 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } +func (m *MatchCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *MutatingWebhook) Size() (n int) { if m == nil { return 0 @@ -1085,6 +1192,12 @@ func (m *MutatingWebhook) Size() (n int) { l = m.ObjectSelector.Size() n += 1 + l + sovGenerated(uint64(l)) } + if len(m.MatchConditions) > 0 { + for _, e := range m.MatchConditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -1235,6 +1348,12 @@ func (m *ValidatingWebhook) Size() (n int) { l = m.ObjectSelector.Size() n += 1 + l + sovGenerated(uint64(l)) } + if len(m.MatchConditions) > 0 { + for _, e := range m.MatchConditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -1299,6 +1418,17 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *MatchCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MatchCondition{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} func (this *MutatingWebhook) String() string { if this == nil { return "nil" @@ -1308,6 +1438,11 @@ func (this *MutatingWebhook) String() string { repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + "," } repeatedStringForRules += "}" + repeatedStringForMatchConditions := "[]MatchCondition{" + for _, f := range this.MatchConditions { + repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchConditions += "}" s := strings.Join([]string{`&MutatingWebhook{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, @@ -1320,6 +1455,7 @@ func (this *MutatingWebhook) String() string { `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, `ReinvocationPolicy:` + valueToStringGenerated(this.ReinvocationPolicy) + `,`, `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `MatchConditions:` + repeatedStringForMatchConditions + `,`, `}`, }, "") return s @@ -1402,6 +1538,11 @@ func (this *ValidatingWebhook) String() string { repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + "," } repeatedStringForRules += "}" + repeatedStringForMatchConditions := "[]MatchCondition{" + for _, f := range this.MatchConditions { + repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchConditions += "}" s := strings.Join([]string{`&ValidatingWebhook{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, @@ -1413,6 +1554,7 @@ func (this *ValidatingWebhook) String() string { `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `MatchConditions:` + repeatedStringForMatchConditions + `,`, `}`, }, "") return s @@ -1469,6 +1611,120 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } +func (m *MatchCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MatchCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MatchCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -1853,6 +2109,40 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchConditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchConditions = append(m.MatchConditions, MatchCondition{}) + if err := m.MatchConditions[len(m.MatchConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -2920,6 +3210,40 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchConditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchConditions = append(m.MatchConditions, MatchCondition{}) + if err := m.MatchConditions[len(m.MatchConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/admissionregistration/v1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1/generated.proto index aa266a2a50..cdf1f47655 100644 --- a/vendor/k8s.io/api/admissionregistration/v1/generated.proto +++ b/vendor/k8s.io/api/admissionregistration/v1/generated.proto @@ -28,6 +28,35 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "k8s.io/api/admissionregistration/v1"; +// MatchCondition represents a condition which must by fulfilled for a request to be sent to a webhook. +message MatchCondition { + // Name is an identifier for this match condition, used for strategic merging of MatchConditions, + // as well as providing an identifier for logging purposes. A good name should be descriptive of + // the associated expression. + // Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and + // must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or + // '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an + // optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName') + // + // Required. + optional string name = 1; + + // Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. + // CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables: + // + // 'object' - The object from the incoming request. The value is null for DELETE requests. + // 'oldObject' - The existing object. The value is null for CREATE requests. + // 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). + // 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + // + // Required. + optional string expression = 2; +} + // MutatingWebhook describes an admission webhook and the resources and operations it applies to. message MutatingWebhook { // The name of the admission webhook. @@ -173,6 +202,28 @@ message MutatingWebhook { // Defaults to "Never". // +optional optional string reinvocationPolicy = 10; + + // MatchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the webhook is called. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped + // + // This is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate. + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +featureGate=AdmissionWebhookMatchConditions + // +optional + repeated MatchCondition matchConditions = 12; } // MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. @@ -409,6 +460,28 @@ message ValidatingWebhook { // include any versions known to the API Server, calls to the webhook will fail // and be subject to the failure policy. repeated string admissionReviewVersions = 8; + + // MatchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the webhook is called. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped + // + // This is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate. + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +featureGate=AdmissionWebhookMatchConditions + // +optional + repeated MatchCondition matchConditions = 11; } // ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. diff --git a/vendor/k8s.io/api/admissionregistration/v1/types.go b/vendor/k8s.io/api/admissionregistration/v1/types.go index e74b276f65..74f17d54a2 100644 --- a/vendor/k8s.io/api/admissionregistration/v1/types.go +++ b/vendor/k8s.io/api/admissionregistration/v1/types.go @@ -307,6 +307,28 @@ type ValidatingWebhook struct { // include any versions known to the API Server, calls to the webhook will fail // and be subject to the failure policy. AdmissionReviewVersions []string `json:"admissionReviewVersions" protobuf:"bytes,8,rep,name=admissionReviewVersions"` + + // MatchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the webhook is called. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped + // + // This is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate. + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +featureGate=AdmissionWebhookMatchConditions + // +optional + MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,11,opt,name=matchConditions"` } // MutatingWebhook describes an admission webhook and the resources and operations it applies to. @@ -454,6 +476,28 @@ type MutatingWebhook struct { // Defaults to "Never". // +optional ReinvocationPolicy *ReinvocationPolicyType `json:"reinvocationPolicy,omitempty" protobuf:"bytes,10,opt,name=reinvocationPolicy,casttype=ReinvocationPolicyType"` + + // MatchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the webhook is called. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped + // + // This is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate. + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +featureGate=AdmissionWebhookMatchConditions + // +optional + MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,12,opt,name=matchConditions"` } // ReinvocationPolicyType specifies what type of policy the admission hook uses. @@ -563,3 +607,32 @@ type ServiceReference struct { // +optional Port *int32 `json:"port,omitempty" protobuf:"varint,4,opt,name=port"` } + +// MatchCondition represents a condition which must by fulfilled for a request to be sent to a webhook. +type MatchCondition struct { + // Name is an identifier for this match condition, used for strategic merging of MatchConditions, + // as well as providing an identifier for logging purposes. A good name should be descriptive of + // the associated expression. + // Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and + // must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or + // '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an + // optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName') + // + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. + // CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables: + // + // 'object' - The object from the incoming request. The value is null for DELETE requests. + // 'oldObject' - The existing object. The value is null for CREATE requests. + // 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). + // 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + // + // Required. + Expression string `json:"expression" protobuf:"bytes,2,opt,name=expression"` +} diff --git a/vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go index ba92729c3c..ce306b307a 100644 --- a/vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go @@ -24,9 +24,19 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_MatchCondition = map[string]string{ + "": "MatchCondition represents a condition which must by fulfilled for a request to be sent to a webhook.", + "name": "Name is an identifier for this match condition, used for strategic merging of MatchConditions, as well as providing an identifier for logging purposes. A good name should be descriptive of the associated expression. Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')\n\nRequired.", + "expression": "Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables:\n\n'object' - The object from the incoming request. The value is null for DELETE requests. 'oldObject' - The existing object. The value is null for CREATE requests. 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\nDocumentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/\n\nRequired.", +} + +func (MatchCondition) SwaggerDoc() map[string]string { + return map_MatchCondition +} + var map_MutatingWebhook = map[string]string{ "": "MutatingWebhook describes an admission webhook and the resources and operations it applies to.", "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", @@ -40,6 +50,7 @@ var map_MutatingWebhook = map[string]string{ "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 10 seconds.", "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy.", "reinvocationPolicy": "reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. Allowed values are \"Never\" and \"IfNeeded\".\n\nNever: the webhook will not be called more than once in a single admission evaluation.\n\nIfNeeded: the webhook will be called at least one additional time as part of the admission evaluation if the object being admitted is modified by other admission plugins after the initial webhook call. Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. Note: * the number of additional invocations is not guaranteed to be exactly one. * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. * webhooks that use this option may be reordered to minimize the number of additional invocations. * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead.\n\nDefaults to \"Never\".", + "matchConditions": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\n\nThis is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate.", } func (MutatingWebhook) SwaggerDoc() map[string]string { @@ -111,6 +122,7 @@ var map_ValidatingWebhook = map[string]string{ "sideEffects": "SideEffects states whether this webhook has side effects. Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission chain and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some.", "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 10 seconds.", "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy.", + "matchConditions": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\n\nThis is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate.", } func (ValidatingWebhook) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go index cff7377a55..b956099138 100644 --- a/vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go @@ -26,6 +26,22 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchCondition) DeepCopyInto(out *MatchCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchCondition. +func (in *MatchCondition) DeepCopy() *MatchCondition { + if in == nil { + return nil + } + out := new(MatchCondition) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MutatingWebhook) DeepCopyInto(out *MutatingWebhook) { *out = *in @@ -77,6 +93,11 @@ func (in *MutatingWebhook) DeepCopyInto(out *MutatingWebhook) { *out = new(ReinvocationPolicyType) **out = **in } + if in.MatchConditions != nil { + in, out := &in.MatchConditions, &out.MatchConditions + *out = make([]MatchCondition, len(*in)) + copy(*out, *in) + } return } @@ -286,6 +307,11 @@ func (in *ValidatingWebhook) DeepCopyInto(out *ValidatingWebhook) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.MatchConditions != nil { + in, out := &in.MatchConditions, &out.MatchConditions + *out = make([]MatchCondition, len(*in)) + copy(*out, *in) + } return } diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go index a00f532d26..7465350263 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go @@ -45,10 +45,94 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +func (m *AuditAnnotation) Reset() { *m = AuditAnnotation{} } +func (*AuditAnnotation) ProtoMessage() {} +func (*AuditAnnotation) Descriptor() ([]byte, []int) { + return fileDescriptor_c3be8d256e3ae3cf, []int{0} +} +func (m *AuditAnnotation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuditAnnotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AuditAnnotation) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuditAnnotation.Merge(m, src) +} +func (m *AuditAnnotation) XXX_Size() int { + return m.Size() +} +func (m *AuditAnnotation) XXX_DiscardUnknown() { + xxx_messageInfo_AuditAnnotation.DiscardUnknown(m) +} + +var xxx_messageInfo_AuditAnnotation proto.InternalMessageInfo + +func (m *ExpressionWarning) Reset() { *m = ExpressionWarning{} } +func (*ExpressionWarning) ProtoMessage() {} +func (*ExpressionWarning) Descriptor() ([]byte, []int) { + return fileDescriptor_c3be8d256e3ae3cf, []int{1} +} +func (m *ExpressionWarning) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExpressionWarning) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExpressionWarning) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExpressionWarning.Merge(m, src) +} +func (m *ExpressionWarning) XXX_Size() int { + return m.Size() +} +func (m *ExpressionWarning) XXX_DiscardUnknown() { + xxx_messageInfo_ExpressionWarning.DiscardUnknown(m) +} + +var xxx_messageInfo_ExpressionWarning proto.InternalMessageInfo + +func (m *MatchCondition) Reset() { *m = MatchCondition{} } +func (*MatchCondition) ProtoMessage() {} +func (*MatchCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_c3be8d256e3ae3cf, []int{2} +} +func (m *MatchCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MatchCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MatchCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_MatchCondition.Merge(m, src) +} +func (m *MatchCondition) XXX_Size() int { + return m.Size() +} +func (m *MatchCondition) XXX_DiscardUnknown() { + xxx_messageInfo_MatchCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_MatchCondition proto.InternalMessageInfo + func (m *MatchResources) Reset() { *m = MatchResources{} } func (*MatchResources) ProtoMessage() {} func (*MatchResources) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{0} + return fileDescriptor_c3be8d256e3ae3cf, []int{3} } func (m *MatchResources) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -76,7 +160,7 @@ var xxx_messageInfo_MatchResources proto.InternalMessageInfo func (m *NamedRuleWithOperations) Reset() { *m = NamedRuleWithOperations{} } func (*NamedRuleWithOperations) ProtoMessage() {} func (*NamedRuleWithOperations) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{1} + return fileDescriptor_c3be8d256e3ae3cf, []int{4} } func (m *NamedRuleWithOperations) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -104,7 +188,7 @@ var xxx_messageInfo_NamedRuleWithOperations proto.InternalMessageInfo func (m *ParamKind) Reset() { *m = ParamKind{} } func (*ParamKind) ProtoMessage() {} func (*ParamKind) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{2} + return fileDescriptor_c3be8d256e3ae3cf, []int{5} } func (m *ParamKind) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -132,7 +216,7 @@ var xxx_messageInfo_ParamKind proto.InternalMessageInfo func (m *ParamRef) Reset() { *m = ParamRef{} } func (*ParamRef) ProtoMessage() {} func (*ParamRef) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{3} + return fileDescriptor_c3be8d256e3ae3cf, []int{6} } func (m *ParamRef) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -157,10 +241,38 @@ func (m *ParamRef) XXX_DiscardUnknown() { var xxx_messageInfo_ParamRef proto.InternalMessageInfo +func (m *TypeChecking) Reset() { *m = TypeChecking{} } +func (*TypeChecking) ProtoMessage() {} +func (*TypeChecking) Descriptor() ([]byte, []int) { + return fileDescriptor_c3be8d256e3ae3cf, []int{7} +} +func (m *TypeChecking) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TypeChecking) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TypeChecking) XXX_Merge(src proto.Message) { + xxx_messageInfo_TypeChecking.Merge(m, src) +} +func (m *TypeChecking) XXX_Size() int { + return m.Size() +} +func (m *TypeChecking) XXX_DiscardUnknown() { + xxx_messageInfo_TypeChecking.DiscardUnknown(m) +} + +var xxx_messageInfo_TypeChecking proto.InternalMessageInfo + func (m *ValidatingAdmissionPolicy) Reset() { *m = ValidatingAdmissionPolicy{} } func (*ValidatingAdmissionPolicy) ProtoMessage() {} func (*ValidatingAdmissionPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{4} + return fileDescriptor_c3be8d256e3ae3cf, []int{8} } func (m *ValidatingAdmissionPolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -188,7 +300,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicy proto.InternalMessageInfo func (m *ValidatingAdmissionPolicyBinding) Reset() { *m = ValidatingAdmissionPolicyBinding{} } func (*ValidatingAdmissionPolicyBinding) ProtoMessage() {} func (*ValidatingAdmissionPolicyBinding) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{5} + return fileDescriptor_c3be8d256e3ae3cf, []int{9} } func (m *ValidatingAdmissionPolicyBinding) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -216,7 +328,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyBinding proto.InternalMessageInfo func (m *ValidatingAdmissionPolicyBindingList) Reset() { *m = ValidatingAdmissionPolicyBindingList{} } func (*ValidatingAdmissionPolicyBindingList) ProtoMessage() {} func (*ValidatingAdmissionPolicyBindingList) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{6} + return fileDescriptor_c3be8d256e3ae3cf, []int{10} } func (m *ValidatingAdmissionPolicyBindingList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -244,7 +356,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyBindingList proto.InternalMessageIn func (m *ValidatingAdmissionPolicyBindingSpec) Reset() { *m = ValidatingAdmissionPolicyBindingSpec{} } func (*ValidatingAdmissionPolicyBindingSpec) ProtoMessage() {} func (*ValidatingAdmissionPolicyBindingSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{7} + return fileDescriptor_c3be8d256e3ae3cf, []int{11} } func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -272,7 +384,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyBindingSpec proto.InternalMessageIn func (m *ValidatingAdmissionPolicyList) Reset() { *m = ValidatingAdmissionPolicyList{} } func (*ValidatingAdmissionPolicyList) ProtoMessage() {} func (*ValidatingAdmissionPolicyList) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{8} + return fileDescriptor_c3be8d256e3ae3cf, []int{12} } func (m *ValidatingAdmissionPolicyList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -300,7 +412,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyList proto.InternalMessageInfo func (m *ValidatingAdmissionPolicySpec) Reset() { *m = ValidatingAdmissionPolicySpec{} } func (*ValidatingAdmissionPolicySpec) ProtoMessage() {} func (*ValidatingAdmissionPolicySpec) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{9} + return fileDescriptor_c3be8d256e3ae3cf, []int{13} } func (m *ValidatingAdmissionPolicySpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -325,10 +437,38 @@ func (m *ValidatingAdmissionPolicySpec) XXX_DiscardUnknown() { var xxx_messageInfo_ValidatingAdmissionPolicySpec proto.InternalMessageInfo +func (m *ValidatingAdmissionPolicyStatus) Reset() { *m = ValidatingAdmissionPolicyStatus{} } +func (*ValidatingAdmissionPolicyStatus) ProtoMessage() {} +func (*ValidatingAdmissionPolicyStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_c3be8d256e3ae3cf, []int{14} +} +func (m *ValidatingAdmissionPolicyStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingAdmissionPolicyStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingAdmissionPolicyStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingAdmissionPolicyStatus.Merge(m, src) +} +func (m *ValidatingAdmissionPolicyStatus) XXX_Size() int { + return m.Size() +} +func (m *ValidatingAdmissionPolicyStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingAdmissionPolicyStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingAdmissionPolicyStatus proto.InternalMessageInfo + func (m *Validation) Reset() { *m = Validation{} } func (*Validation) ProtoMessage() {} func (*Validation) Descriptor() ([]byte, []int) { - return fileDescriptor_c3be8d256e3ae3cf, []int{10} + return fileDescriptor_c3be8d256e3ae3cf, []int{15} } func (m *Validation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -354,16 +494,21 @@ func (m *Validation) XXX_DiscardUnknown() { var xxx_messageInfo_Validation proto.InternalMessageInfo func init() { + proto.RegisterType((*AuditAnnotation)(nil), "k8s.io.api.admissionregistration.v1alpha1.AuditAnnotation") + proto.RegisterType((*ExpressionWarning)(nil), "k8s.io.api.admissionregistration.v1alpha1.ExpressionWarning") + proto.RegisterType((*MatchCondition)(nil), "k8s.io.api.admissionregistration.v1alpha1.MatchCondition") proto.RegisterType((*MatchResources)(nil), "k8s.io.api.admissionregistration.v1alpha1.MatchResources") proto.RegisterType((*NamedRuleWithOperations)(nil), "k8s.io.api.admissionregistration.v1alpha1.NamedRuleWithOperations") proto.RegisterType((*ParamKind)(nil), "k8s.io.api.admissionregistration.v1alpha1.ParamKind") proto.RegisterType((*ParamRef)(nil), "k8s.io.api.admissionregistration.v1alpha1.ParamRef") + proto.RegisterType((*TypeChecking)(nil), "k8s.io.api.admissionregistration.v1alpha1.TypeChecking") proto.RegisterType((*ValidatingAdmissionPolicy)(nil), "k8s.io.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicy") proto.RegisterType((*ValidatingAdmissionPolicyBinding)(nil), "k8s.io.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyBinding") proto.RegisterType((*ValidatingAdmissionPolicyBindingList)(nil), "k8s.io.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyBindingList") proto.RegisterType((*ValidatingAdmissionPolicyBindingSpec)(nil), "k8s.io.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyBindingSpec") proto.RegisterType((*ValidatingAdmissionPolicyList)(nil), "k8s.io.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyList") proto.RegisterType((*ValidatingAdmissionPolicySpec)(nil), "k8s.io.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicySpec") + proto.RegisterType((*ValidatingAdmissionPolicyStatus)(nil), "k8s.io.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyStatus") proto.RegisterType((*Validation)(nil), "k8s.io.api.admissionregistration.v1alpha1.Validation") } @@ -372,73 +517,194 @@ func init() { } var fileDescriptor_c3be8d256e3ae3cf = []byte{ - // 1054 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0xcd, 0x6e, 0x1c, 0x45, - 0x10, 0xf6, 0xc4, 0x9b, 0xc4, 0xdb, 0x1b, 0x3b, 0x76, 0xe3, 0x88, 0xc5, 0x82, 0xdd, 0xd5, 0x2a, - 0x42, 0xf6, 0x81, 0x19, 0xec, 0x04, 0x02, 0x27, 0x94, 0x21, 0x41, 0x44, 0xb1, 0x63, 0xab, 0x8d, - 0x12, 0x09, 0x11, 0x89, 0xf6, 0x4c, 0x7b, 0xb6, 0xb3, 0x3b, 0x3f, 0x4c, 0xf7, 0x58, 0xb6, 0x38, - 0x80, 0xc4, 0x0b, 0x70, 0xe0, 0x41, 0x38, 0x71, 0xe1, 0x05, 0x7c, 0xcc, 0xd1, 0x5c, 0x46, 0x78, - 0xb8, 0xc0, 0x0b, 0x80, 0xe4, 0x13, 0xea, 0x9e, 0x9e, 0xbf, 0xfd, 0xc1, 0xeb, 0x60, 0xe5, 0xb6, - 0x5d, 0x3f, 0xdf, 0x57, 0x55, 0x5d, 0x35, 0xd5, 0x0b, 0x50, 0xff, 0x23, 0xa6, 0x53, 0xdf, 0xe8, - 0x47, 0x7b, 0x24, 0xf4, 0x08, 0x27, 0xcc, 0x38, 0x20, 0x9e, 0xed, 0x87, 0x86, 0x52, 0xe0, 0x80, - 0x1a, 0xd8, 0x76, 0x29, 0x63, 0xd4, 0xf7, 0x42, 0xe2, 0x50, 0xc6, 0x43, 0xcc, 0xa9, 0xef, 0x19, - 0x07, 0xeb, 0x78, 0x10, 0xf4, 0xf0, 0xba, 0xe1, 0x10, 0x8f, 0x84, 0x98, 0x13, 0x5b, 0x0f, 0x42, - 0x9f, 0xfb, 0x70, 0x2d, 0x75, 0xd5, 0x71, 0x40, 0xf5, 0xb1, 0xae, 0x7a, 0xe6, 0xba, 0xf2, 0x9e, - 0x43, 0x79, 0x2f, 0xda, 0xd3, 0x2d, 0xdf, 0x35, 0x1c, 0xdf, 0xf1, 0x0d, 0x89, 0xb0, 0x17, 0xed, - 0xcb, 0x93, 0x3c, 0xc8, 0x5f, 0x29, 0xf2, 0xca, 0x9d, 0x29, 0x82, 0x1a, 0x0e, 0x67, 0xe5, 0x6e, - 0xe1, 0xe4, 0x62, 0xab, 0x47, 0x3d, 0x12, 0x1e, 0x19, 0x41, 0xdf, 0x11, 0x02, 0x66, 0xb8, 0x84, - 0xe3, 0x71, 0x5e, 0xc6, 0x24, 0xaf, 0x30, 0xf2, 0x38, 0x75, 0xc9, 0x88, 0xc3, 0x87, 0xe7, 0x39, - 0x30, 0xab, 0x47, 0x5c, 0x3c, 0xec, 0xd7, 0xfd, 0xad, 0x06, 0x16, 0xb6, 0x30, 0xb7, 0x7a, 0x88, - 0x30, 0x3f, 0x0a, 0x2d, 0xc2, 0xe0, 0x21, 0x58, 0xf2, 0xb0, 0x4b, 0x58, 0x80, 0x2d, 0xb2, 0x4b, - 0x06, 0xc4, 0xe2, 0x7e, 0xd8, 0xd4, 0x3a, 0xda, 0x6a, 0x63, 0xe3, 0x8e, 0x5e, 0x14, 0x37, 0xa7, - 0xd1, 0x83, 0xbe, 0x23, 0x04, 0x4c, 0x17, 0xd9, 0xe8, 0x07, 0xeb, 0xfa, 0x26, 0xde, 0x23, 0x83, - 0xcc, 0xd5, 0xbc, 0x95, 0xc4, 0xed, 0xa5, 0x27, 0xc3, 0x88, 0x68, 0x94, 0x04, 0xfa, 0x60, 0xc1, - 0xdf, 0x7b, 0x41, 0x2c, 0x9e, 0xd3, 0x5e, 0x79, 0x75, 0x5a, 0x98, 0xc4, 0xed, 0x85, 0xed, 0x0a, - 0x1c, 0x1a, 0x82, 0x87, 0xdf, 0x81, 0xf9, 0x50, 0xe5, 0x8d, 0xa2, 0x01, 0x61, 0xcd, 0xd9, 0xce, - 0xec, 0x6a, 0x63, 0xc3, 0xd4, 0xa7, 0xee, 0x21, 0x5d, 0x24, 0x66, 0x0b, 0xe7, 0x67, 0x94, 0xf7, - 0xb6, 0x03, 0x92, 0xea, 0x99, 0x79, 0xeb, 0x38, 0x6e, 0xcf, 0x24, 0x71, 0x7b, 0x1e, 0x95, 0x09, - 0x50, 0x95, 0x0f, 0xfe, 0xa4, 0x81, 0x65, 0x72, 0x68, 0x0d, 0x22, 0x9b, 0x54, 0xec, 0x9a, 0xb5, - 0x4b, 0x0b, 0xe4, 0x6d, 0x15, 0xc8, 0xf2, 0xc3, 0x31, 0x3c, 0x68, 0x2c, 0x3b, 0x7c, 0x00, 0x1a, - 0xae, 0x68, 0x8a, 0x1d, 0x7f, 0x40, 0xad, 0xa3, 0xe6, 0xf5, 0x8e, 0xb6, 0x5a, 0x37, 0xbb, 0x49, - 0xdc, 0x6e, 0x6c, 0x15, 0xe2, 0xb3, 0xb8, 0x7d, 0xb3, 0x74, 0xfc, 0xe2, 0x28, 0x20, 0xa8, 0xec, - 0xd6, 0x3d, 0xd1, 0xc0, 0x9b, 0x13, 0xa2, 0x82, 0xf7, 0x8a, 0xca, 0xcb, 0xd6, 0x68, 0x6a, 0x9d, - 0xd9, 0xd5, 0xba, 0xb9, 0x54, 0xae, 0x98, 0x54, 0xa0, 0xaa, 0x1d, 0xfc, 0x41, 0x03, 0x30, 0x1c, - 0xc1, 0x53, 0x8d, 0x72, 0x6f, 0x9a, 0x7a, 0xe9, 0x63, 0x8a, 0xb4, 0xa2, 0x8a, 0x04, 0x47, 0x75, - 0x68, 0x0c, 0x5d, 0x17, 0x83, 0xfa, 0x0e, 0x0e, 0xb1, 0xfb, 0x98, 0x7a, 0x36, 0xdc, 0x00, 0x00, - 0x07, 0xf4, 0x29, 0x09, 0x05, 0x99, 0x9c, 0x94, 0xba, 0x09, 0x15, 0x20, 0xb8, 0xbf, 0xf3, 0x48, - 0x69, 0x50, 0xc9, 0x0a, 0x76, 0x40, 0xad, 0x4f, 0x3d, 0x5b, 0xc6, 0x5d, 0x37, 0x6f, 0x28, 0xeb, - 0x9a, 0xc0, 0x43, 0x52, 0xd3, 0x7d, 0x0e, 0xe6, 0x24, 0x05, 0x22, 0xfb, 0xc2, 0x5a, 0x4c, 0x8b, - 0xc2, 0xce, 0xad, 0x45, 0x45, 0x90, 0xd4, 0x40, 0x03, 0xd4, 0xf3, 0x79, 0x52, 0xa0, 0x4b, 0xca, - 0xac, 0x9e, 0xcf, 0x1e, 0x2a, 0x6c, 0xba, 0x7f, 0x69, 0xe0, 0xad, 0xa7, 0x78, 0x40, 0x6d, 0xcc, - 0xa9, 0xe7, 0xdc, 0xcf, 0x6a, 0x95, 0x5e, 0x1d, 0xfc, 0x1a, 0xcc, 0x89, 0xa9, 0xb2, 0x31, 0xc7, - 0x6a, 0xf4, 0xdf, 0x9f, 0x6e, 0x06, 0xd3, 0x81, 0xdb, 0x22, 0x1c, 0x17, 0x25, 0x28, 0x64, 0x28, - 0x47, 0x85, 0x2f, 0x40, 0x8d, 0x05, 0xc4, 0x52, 0x17, 0xf7, 0xf9, 0x05, 0x1a, 0x7d, 0x62, 0xd4, - 0xbb, 0x01, 0xb1, 0x8a, 0xe2, 0x88, 0x13, 0x92, 0x1c, 0xdd, 0x7f, 0x34, 0xd0, 0x99, 0xe8, 0x65, - 0x52, 0xcf, 0xa6, 0x9e, 0xf3, 0x1a, 0x52, 0xfe, 0xa6, 0x92, 0xf2, 0xf6, 0x65, 0xa4, 0xac, 0x82, - 0x9f, 0x98, 0xf9, 0xdf, 0x1a, 0xb8, 0x7d, 0x9e, 0xf3, 0x26, 0x65, 0x1c, 0x7e, 0x35, 0x92, 0xbd, - 0x3e, 0xe5, 0x47, 0x97, 0xb2, 0x34, 0xf7, 0x45, 0x45, 0x3f, 0x97, 0x49, 0x4a, 0x99, 0x07, 0xe0, - 0x2a, 0xe5, 0xc4, 0x15, 0x63, 0x2a, 0x3e, 0x6b, 0x8f, 0x2f, 0x31, 0x75, 0x73, 0x5e, 0xf1, 0x5e, - 0x7d, 0x24, 0x18, 0x50, 0x4a, 0xd4, 0xfd, 0xf9, 0xca, 0xf9, 0x89, 0x8b, 0x3a, 0x89, 0xe1, 0x0d, - 0xa4, 0xf0, 0x49, 0x31, 0x60, 0xf9, 0x35, 0xee, 0xe4, 0x1a, 0x54, 0xb2, 0x82, 0xcf, 0xc1, 0x5c, - 0xa0, 0x46, 0x73, 0xcc, 0x86, 0x3a, 0x2f, 0xa3, 0x6c, 0xaa, 0xcd, 0x1b, 0xa2, 0x5a, 0xd9, 0x09, - 0xe5, 0x90, 0x30, 0x02, 0x0b, 0x6e, 0x65, 0x25, 0x37, 0x67, 0x25, 0xc9, 0xc7, 0x17, 0x20, 0xa9, - 0xee, 0xf4, 0x74, 0x19, 0x56, 0x65, 0x68, 0x88, 0xa4, 0xfb, 0xa7, 0x06, 0xde, 0x99, 0x58, 0xb2, - 0xd7, 0xd0, 0x24, 0xb4, 0xda, 0x24, 0x0f, 0x2e, 0xa5, 0x49, 0xc6, 0x77, 0xc7, 0xaf, 0xb3, 0xff, - 0x91, 0xaa, 0x6c, 0x0b, 0x0c, 0xea, 0x41, 0xf6, 0x81, 0x57, 0xb9, 0xde, 0xbd, 0xe8, 0x1d, 0x0b, - 0x5f, 0x73, 0x5e, 0x7c, 0x81, 0xf3, 0x23, 0x2a, 0x50, 0xe1, 0xb7, 0x60, 0x51, 0xde, 0xc0, 0xa7, - 0xbe, 0x27, 0x00, 0xa8, 0xc7, 0xb3, 0x35, 0xf6, 0x3f, 0x2e, 0x7a, 0x39, 0x89, 0xdb, 0x8b, 0x5b, - 0x43, 0xb0, 0x68, 0x84, 0x08, 0x0e, 0x40, 0xe3, 0x40, 0x15, 0x40, 0xac, 0xcf, 0xf4, 0xdd, 0xf3, - 0xc1, 0x2b, 0x94, 0xdc, 0xf7, 0xcc, 0x37, 0x54, 0x8d, 0x1b, 0x85, 0x8c, 0xa1, 0x32, 0x3c, 0xdc, - 0x04, 0xf3, 0xfb, 0x98, 0x0e, 0xa2, 0x90, 0xa8, 0x17, 0x45, 0x4d, 0xce, 0xd9, 0xbb, 0x62, 0xdb, - 0x7f, 0x56, 0x56, 0x9c, 0xc5, 0xed, 0xa5, 0x8a, 0x40, 0xbe, 0x2a, 0xaa, 0xce, 0xdd, 0x5f, 0x34, - 0x00, 0x0a, 0x2a, 0x78, 0x1b, 0x80, 0x87, 0x87, 0x41, 0x48, 0x58, 0x69, 0xfd, 0xd6, 0x44, 0x48, - 0xa8, 0x24, 0x87, 0x6b, 0xe0, 0xba, 0x4b, 0x18, 0xc3, 0x4e, 0xb6, 0x1e, 0x6f, 0xaa, 0xa8, 0xaf, - 0x6f, 0xa5, 0x62, 0x94, 0xe9, 0xe1, 0x33, 0x70, 0x2d, 0x24, 0x98, 0xf9, 0x9e, 0x9c, 0xbb, 0xba, - 0xf9, 0x49, 0x12, 0xb7, 0xaf, 0x21, 0x29, 0x39, 0x8b, 0xdb, 0xeb, 0xd3, 0x3c, 0xe8, 0xf5, 0x5d, - 0x8e, 0x79, 0xc4, 0x52, 0x27, 0xa4, 0xe0, 0xcc, 0xed, 0xe3, 0xd3, 0xd6, 0xcc, 0xcb, 0xd3, 0xd6, - 0xcc, 0xc9, 0x69, 0x6b, 0xe6, 0xfb, 0xa4, 0xa5, 0x1d, 0x27, 0x2d, 0xed, 0x65, 0xd2, 0xd2, 0x4e, - 0x92, 0x96, 0xf6, 0x7b, 0xd2, 0xd2, 0x7e, 0xfc, 0xa3, 0x35, 0xf3, 0xe5, 0xda, 0xd4, 0xff, 0x7d, - 0xfe, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x20, 0xc8, 0x63, 0x1d, 0x40, 0x0d, 0x00, 0x00, + // 1407 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcb, 0x6f, 0x1b, 0x45, + 0x18, 0xcf, 0xc6, 0x4e, 0x9a, 0x8c, 0xf3, 0xb0, 0x87, 0x56, 0x75, 0x23, 0x6a, 0x47, 0xab, 0x0a, + 0x35, 0x12, 0xec, 0x92, 0xb4, 0x50, 0x40, 0x48, 0x28, 0xdb, 0x17, 0x7d, 0xa4, 0x89, 0xa6, 0x28, + 0x91, 0x10, 0x95, 0x98, 0xec, 0x4e, 0xec, 0xa9, 0xbd, 0x0f, 0x76, 0xd6, 0xa1, 0x11, 0x48, 0x54, + 0xe2, 0x02, 0x37, 0x0e, 0x5c, 0xf8, 0x5f, 0xb8, 0x70, 0xeb, 0xb1, 0xc7, 0x72, 0xc0, 0x22, 0xe6, + 0xc2, 0x5f, 0x00, 0x52, 0x2e, 0xa0, 0x99, 0x9d, 0x7d, 0x3b, 0xc4, 0x2e, 0x81, 0x9b, 0xf7, 0x7b, + 0xfc, 0x7e, 0xf3, 0x7d, 0xf3, 0x7d, 0x33, 0xdf, 0x18, 0xa0, 0xce, 0x3b, 0x4c, 0xa3, 0xae, 0xde, + 0xe9, 0xed, 0x12, 0xdf, 0x21, 0x01, 0x61, 0xfa, 0x3e, 0x71, 0x2c, 0xd7, 0xd7, 0xa5, 0x02, 0x7b, + 0x54, 0xc7, 0x96, 0x4d, 0x19, 0xa3, 0xae, 0xe3, 0x93, 0x16, 0x65, 0x81, 0x8f, 0x03, 0xea, 0x3a, + 0xfa, 0xfe, 0x2a, 0xee, 0x7a, 0x6d, 0xbc, 0xaa, 0xb7, 0x88, 0x43, 0x7c, 0x1c, 0x10, 0x4b, 0xf3, + 0x7c, 0x37, 0x70, 0xe1, 0x4a, 0xe8, 0xaa, 0x61, 0x8f, 0x6a, 0x43, 0x5d, 0xb5, 0xc8, 0x75, 0xe9, + 0x8d, 0x16, 0x0d, 0xda, 0xbd, 0x5d, 0xcd, 0x74, 0x6d, 0xbd, 0xe5, 0xb6, 0x5c, 0x5d, 0x20, 0xec, + 0xf6, 0xf6, 0xc4, 0x97, 0xf8, 0x10, 0xbf, 0x42, 0xe4, 0xa5, 0x2b, 0x23, 0x2c, 0x2a, 0xbf, 0x9c, + 0xa5, 0xab, 0x89, 0x93, 0x8d, 0xcd, 0x36, 0x75, 0x88, 0x7f, 0xa0, 0x7b, 0x9d, 0x16, 0x17, 0x30, + 0xdd, 0x26, 0x01, 0x1e, 0xe6, 0xa5, 0x1f, 0xe7, 0xe5, 0xf7, 0x9c, 0x80, 0xda, 0xa4, 0xe0, 0xf0, + 0xf6, 0x49, 0x0e, 0xcc, 0x6c, 0x13, 0x1b, 0xe7, 0xfd, 0x54, 0x06, 0x16, 0xd7, 0x7b, 0x16, 0x0d, + 0xd6, 0x1d, 0xc7, 0x0d, 0x44, 0x10, 0xf0, 0x22, 0x28, 0x75, 0xc8, 0x41, 0x5d, 0x59, 0x56, 0x2e, + 0xcf, 0x1a, 0x95, 0x67, 0xfd, 0xe6, 0xc4, 0xa0, 0xdf, 0x2c, 0xdd, 0x23, 0x07, 0x88, 0xcb, 0xe1, + 0x3a, 0x58, 0xdc, 0xc7, 0xdd, 0x1e, 0xb9, 0xf9, 0xc4, 0xf3, 0x89, 0x48, 0x41, 0x7d, 0x52, 0x98, + 0x9e, 0x97, 0xa6, 0x8b, 0xdb, 0x59, 0x35, 0xca, 0xdb, 0xab, 0x5d, 0x50, 0x4b, 0xbe, 0x76, 0xb0, + 0xef, 0x50, 0xa7, 0x05, 0x5f, 0x07, 0x33, 0x7b, 0x94, 0x74, 0x2d, 0x44, 0xf6, 0x24, 0x60, 0x55, + 0x02, 0xce, 0xdc, 0x92, 0x72, 0x14, 0x5b, 0xc0, 0x15, 0x70, 0xe6, 0xf3, 0xd0, 0xb1, 0x5e, 0x12, + 0xc6, 0x8b, 0xd2, 0xf8, 0x8c, 0xc4, 0x43, 0x91, 0x5e, 0xdd, 0x03, 0x0b, 0x1b, 0x38, 0x30, 0xdb, + 0xd7, 0x5d, 0xc7, 0xa2, 0x22, 0xc2, 0x65, 0x50, 0x76, 0xb0, 0x4d, 0x64, 0x88, 0x73, 0xd2, 0xb3, + 0xfc, 0x00, 0xdb, 0x04, 0x09, 0x0d, 0x5c, 0x03, 0x80, 0xe4, 0xe3, 0x83, 0xd2, 0x0e, 0xa4, 0x42, + 0x4b, 0x59, 0xa9, 0x3f, 0x97, 0x25, 0x11, 0x22, 0xcc, 0xed, 0xf9, 0x26, 0x61, 0xf0, 0x09, 0xa8, + 0x71, 0x38, 0xe6, 0x61, 0x93, 0x3c, 0x24, 0x5d, 0x62, 0x06, 0xae, 0x2f, 0x58, 0x2b, 0x6b, 0x57, + 0xb4, 0xa4, 0x4e, 0xe3, 0x1d, 0xd3, 0xbc, 0x4e, 0x8b, 0x0b, 0x98, 0xc6, 0x0b, 0x43, 0xdb, 0x5f, + 0xd5, 0xee, 0xe3, 0x5d, 0xd2, 0x8d, 0x5c, 0x8d, 0x73, 0x83, 0x7e, 0xb3, 0xf6, 0x20, 0x8f, 0x88, + 0x8a, 0x24, 0xd0, 0x05, 0x0b, 0xee, 0xee, 0x63, 0x62, 0x06, 0x31, 0xed, 0xe4, 0xcb, 0xd3, 0xc2, + 0x41, 0xbf, 0xb9, 0xb0, 0x99, 0x81, 0x43, 0x39, 0x78, 0xf8, 0x15, 0x98, 0xf7, 0x65, 0xdc, 0xa8, + 0xd7, 0x25, 0xac, 0x5e, 0x5a, 0x2e, 0x5d, 0xae, 0xac, 0x19, 0xda, 0xc8, 0xed, 0xa8, 0xf1, 0xc0, + 0x2c, 0xee, 0xbc, 0x43, 0x83, 0xf6, 0xa6, 0x47, 0x42, 0x3d, 0x33, 0xce, 0xc9, 0xc4, 0xcf, 0xa3, + 0x34, 0x01, 0xca, 0xf2, 0xc1, 0xef, 0x15, 0x70, 0x96, 0x3c, 0x31, 0xbb, 0x3d, 0x8b, 0x64, 0xec, + 0xea, 0xe5, 0x53, 0x5b, 0xc8, 0xab, 0x72, 0x21, 0x67, 0x6f, 0x0e, 0xe1, 0x41, 0x43, 0xd9, 0xe1, + 0x0d, 0x50, 0xb1, 0x79, 0x51, 0x6c, 0xb9, 0x5d, 0x6a, 0x1e, 0xd4, 0xcf, 0x88, 0x52, 0x52, 0x07, + 0xfd, 0x66, 0x65, 0x23, 0x11, 0x1f, 0xf5, 0x9b, 0x8b, 0xa9, 0xcf, 0x8f, 0x0e, 0x3c, 0x82, 0xd2, + 0x6e, 0xea, 0x0b, 0x05, 0x9c, 0x3f, 0x66, 0x55, 0xf0, 0x5a, 0x92, 0x79, 0x51, 0x1a, 0x75, 0x65, + 0xb9, 0x74, 0x79, 0xd6, 0xa8, 0xa5, 0x33, 0x26, 0x14, 0x28, 0x6b, 0x07, 0xbf, 0x56, 0x00, 0xf4, + 0x0b, 0x78, 0xb2, 0x50, 0xae, 0x8d, 0x92, 0x2f, 0x6d, 0x48, 0x92, 0x96, 0x64, 0x92, 0x60, 0x51, + 0x87, 0x86, 0xd0, 0xa9, 0x18, 0xcc, 0x6e, 0x61, 0x1f, 0xdb, 0xf7, 0xa8, 0x63, 0xf1, 0xbe, 0xc3, + 0x1e, 0xdd, 0x26, 0xbe, 0xe8, 0x3b, 0x25, 0xdb, 0x77, 0xeb, 0x5b, 0x77, 0xa4, 0x06, 0xa5, 0xac, + 0x78, 0x37, 0x77, 0xa8, 0x63, 0xc9, 0x2e, 0x8d, 0xbb, 0x99, 0xe3, 0x21, 0xa1, 0x51, 0x1f, 0x81, + 0x19, 0x41, 0xc1, 0x0f, 0x8e, 0x93, 0x7b, 0x5f, 0x07, 0xb3, 0x71, 0x3f, 0x49, 0xd0, 0x9a, 0x34, + 0x9b, 0x8d, 0x7b, 0x0f, 0x25, 0x36, 0xea, 0x0f, 0x0a, 0x98, 0xe3, 0x5b, 0x76, 0xbd, 0x4d, 0xcc, + 0x0e, 0x3f, 0xca, 0xbe, 0x51, 0x00, 0x24, 0xf9, 0x03, 0x2e, 0xdc, 0x97, 0xca, 0xda, 0xfb, 0x63, + 0x14, 0x62, 0xe1, 0x94, 0x4c, 0xb2, 0x5b, 0x50, 0x31, 0x34, 0x84, 0x53, 0xfd, 0x65, 0x12, 0x5c, + 0xd8, 0xc6, 0x5d, 0x6a, 0xe1, 0x80, 0x3a, 0xad, 0xf5, 0x88, 0x2e, 0x2c, 0x2b, 0xf8, 0x29, 0x98, + 0xe1, 0x1d, 0x6f, 0xe1, 0x00, 0xcb, 0x63, 0xe9, 0xcd, 0xd1, 0xce, 0x87, 0xf0, 0x30, 0xd8, 0x20, + 0x01, 0x4e, 0xb6, 0x27, 0x91, 0xa1, 0x18, 0x15, 0x3e, 0x06, 0x65, 0xe6, 0x11, 0x53, 0x16, 0xd5, + 0x87, 0x63, 0xc4, 0x7e, 0xec, 0xaa, 0x1f, 0x7a, 0xc4, 0x4c, 0x36, 0x8e, 0x7f, 0x21, 0xc1, 0x01, + 0x7d, 0x30, 0xcd, 0x02, 0x1c, 0xf4, 0x98, 0xb8, 0x12, 0x2a, 0x6b, 0x77, 0x4f, 0x85, 0x4d, 0x20, + 0x1a, 0x0b, 0x92, 0x6f, 0x3a, 0xfc, 0x46, 0x92, 0x49, 0xfd, 0x53, 0x01, 0xcb, 0xc7, 0xfa, 0x1a, + 0xd4, 0xb1, 0x78, 0x3d, 0xfc, 0xf7, 0x69, 0xfe, 0x2c, 0x93, 0xe6, 0xcd, 0xd3, 0x08, 0x5c, 0x2e, + 0xfe, 0xb8, 0x6c, 0xab, 0x7f, 0x28, 0xe0, 0xd2, 0x49, 0xce, 0xf7, 0x29, 0x0b, 0xe0, 0x27, 0x85, + 0xe8, 0xb5, 0x11, 0x2f, 0x21, 0xca, 0xc2, 0xd8, 0xe3, 0x41, 0x20, 0x92, 0xa4, 0x22, 0xf7, 0xc0, + 0x14, 0x0d, 0x88, 0xcd, 0x8f, 0x2d, 0xde, 0x5d, 0xf7, 0x4e, 0x31, 0x74, 0x63, 0x5e, 0xf2, 0x4e, + 0xdd, 0xe1, 0x0c, 0x28, 0x24, 0x52, 0xbf, 0x2d, 0x9d, 0x1c, 0x38, 0xcf, 0x13, 0x3f, 0xcc, 0x3c, + 0x21, 0x7c, 0x90, 0x1c, 0x38, 0xf1, 0x36, 0x6e, 0xc5, 0x1a, 0x94, 0xb2, 0x82, 0x8f, 0xc0, 0x8c, + 0x27, 0x8f, 0xaa, 0x21, 0x37, 0xf6, 0x49, 0x11, 0x45, 0xa7, 0x9c, 0x31, 0xc7, 0xb3, 0x15, 0x7d, + 0xa1, 0x18, 0x12, 0xf6, 0xc0, 0x82, 0x9d, 0x19, 0x51, 0x64, 0xab, 0xbc, 0x3b, 0x06, 0x49, 0x76, + 0xc6, 0x09, 0x87, 0x83, 0xac, 0x0c, 0xe5, 0x48, 0xe0, 0x0e, 0xa8, 0xed, 0xcb, 0x8c, 0xb9, 0xce, + 0xba, 0x19, 0xde, 0x33, 0x65, 0x71, 0x4d, 0xad, 0xf0, 0x91, 0x66, 0x3b, 0xaf, 0x3c, 0xea, 0x37, + 0xab, 0x79, 0x21, 0x2a, 0x62, 0xa8, 0xbf, 0x2b, 0xe0, 0xe2, 0xb1, 0x7b, 0xf1, 0x3f, 0x54, 0x1f, + 0xcd, 0x56, 0xdf, 0x8d, 0x53, 0xa9, 0xbe, 0xe1, 0x65, 0xf7, 0xe3, 0xd4, 0x3f, 0x84, 0x2a, 0xea, + 0x0d, 0x83, 0x59, 0x2f, 0xba, 0x49, 0x65, 0xac, 0x57, 0xc7, 0x2d, 0x1e, 0xee, 0x6b, 0xcc, 0xf3, + 0xab, 0x2e, 0xfe, 0x44, 0x09, 0x2a, 0xfc, 0x02, 0x54, 0x6d, 0x39, 0x4b, 0x73, 0x00, 0xea, 0x04, + 0xd1, 0xbc, 0xf0, 0x2f, 0x2a, 0xe8, 0xec, 0xa0, 0xdf, 0xac, 0x6e, 0xe4, 0x60, 0x51, 0x81, 0x08, + 0x76, 0x41, 0x25, 0xa9, 0x80, 0x68, 0xc0, 0x7c, 0xeb, 0x25, 0x52, 0xee, 0x3a, 0xc6, 0x2b, 0x32, + 0xc7, 0x95, 0x44, 0xc6, 0x50, 0x1a, 0x1e, 0xde, 0x07, 0xf3, 0x7b, 0x98, 0x76, 0x7b, 0x3e, 0x91, + 0xa3, 0x5b, 0x59, 0x34, 0xf0, 0x6b, 0x7c, 0xac, 0xba, 0x95, 0x56, 0x1c, 0xf5, 0x9b, 0xb5, 0x8c, + 0x40, 0x8c, 0x6f, 0x59, 0x67, 0xf8, 0x54, 0x01, 0x55, 0x9c, 0x7d, 0x68, 0xb1, 0xfa, 0x94, 0x88, + 0xe0, 0xbd, 0x31, 0x22, 0xc8, 0xbd, 0xd5, 0x8c, 0xba, 0x0c, 0xa3, 0x9a, 0x53, 0x30, 0x54, 0x60, + 0x83, 0x5f, 0x82, 0x45, 0x3b, 0xf3, 0x0e, 0x62, 0xf5, 0x69, 0xb1, 0x80, 0xb1, 0xb7, 0x2e, 0x46, + 0x48, 0xde, 0x7c, 0x59, 0x39, 0x43, 0x79, 0x2a, 0xf5, 0xa7, 0x49, 0xd0, 0x3c, 0xe1, 0x92, 0x85, + 0x77, 0x01, 0x74, 0x77, 0x19, 0xf1, 0xf7, 0x89, 0x75, 0x3b, 0x7c, 0xa7, 0x46, 0x53, 0x60, 0x29, + 0x19, 0x7c, 0x36, 0x0b, 0x16, 0x68, 0x88, 0x17, 0xb4, 0xc1, 0x5c, 0x90, 0x9a, 0xc9, 0xc6, 0x99, + 0x6a, 0x65, 0xa8, 0xe9, 0x91, 0xce, 0xa8, 0x0e, 0xfa, 0xcd, 0xcc, 0x90, 0x87, 0x32, 0xf0, 0xd0, + 0x04, 0xc0, 0x4c, 0xf2, 0x1a, 0x96, 0xa6, 0x3e, 0xda, 0x41, 0x93, 0x64, 0x33, 0xbe, 0x1c, 0x52, + 0x89, 0x4c, 0xc1, 0xaa, 0x7f, 0x29, 0x00, 0x24, 0xf5, 0x0a, 0x2f, 0x81, 0xd4, 0x53, 0x54, 0xde, + 0x2f, 0x65, 0x0e, 0x81, 0x52, 0x72, 0xfe, 0x52, 0xb6, 0x09, 0x63, 0xb8, 0x15, 0x0d, 0xb3, 0xf1, + 0x4b, 0x79, 0x23, 0x14, 0xa3, 0x48, 0x0f, 0x77, 0xc0, 0xb4, 0x4f, 0x30, 0x73, 0x1d, 0xf9, 0xa6, + 0xfe, 0x80, 0x0f, 0x3c, 0x48, 0x48, 0x8e, 0xfa, 0xcd, 0xd5, 0x51, 0xfe, 0xc9, 0xd0, 0xe4, 0x7c, + 0x24, 0x9c, 0x90, 0x84, 0x83, 0xb7, 0x41, 0x4d, 0x72, 0xa4, 0x16, 0x1c, 0xf6, 0xd3, 0x05, 0xb9, + 0x9a, 0xda, 0x46, 0xde, 0x00, 0x15, 0x7d, 0x8c, 0xcd, 0x67, 0x87, 0x8d, 0x89, 0xe7, 0x87, 0x8d, + 0x89, 0x17, 0x87, 0x8d, 0x89, 0xa7, 0x83, 0x86, 0xf2, 0x6c, 0xd0, 0x50, 0x9e, 0x0f, 0x1a, 0xca, + 0x8b, 0x41, 0x43, 0xf9, 0x75, 0xd0, 0x50, 0xbe, 0xfb, 0xad, 0x31, 0xf1, 0xf1, 0xca, 0xc8, 0xff, + 0x1e, 0xfd, 0x1d, 0x00, 0x00, 0xff, 0xff, 0x08, 0xaf, 0xaa, 0x52, 0x82, 0x12, 0x00, 0x00, +} + +func (m *AuditAnnotation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuditAnnotation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuditAnnotation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.ValueExpression) + copy(dAtA[i:], m.ValueExpression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ValueExpression))) + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ExpressionWarning) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExpressionWarning) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExpressionWarning) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Warning) + copy(dAtA[i:], m.Warning) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Warning))) + i-- + dAtA[i] = 0x1a + i -= len(m.FieldRef) + copy(dAtA[i:], m.FieldRef) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldRef))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} + +func (m *MatchCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MatchCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MatchCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *MatchResources) Marshal() (dAtA []byte, err error) { @@ -631,6 +897,43 @@ func (m *ParamRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *TypeChecking) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TypeChecking) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TypeChecking) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ExpressionWarnings) > 0 { + for iNdEx := len(m.ExpressionWarnings) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ExpressionWarnings[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func (m *ValidatingAdmissionPolicy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -651,6 +954,16 @@ func (m *ValidatingAdmissionPolicy) MarshalToSizedBuffer(dAtA []byte) (int, erro _ = i var l int _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a { size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -784,6 +1097,15 @@ func (m *ValidatingAdmissionPolicyBindingSpec) MarshalToSizedBuffer(dAtA []byte) _ = i var l int _ = l + if len(m.ValidationActions) > 0 { + for iNdEx := len(m.ValidationActions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ValidationActions[iNdEx]) + copy(dAtA[i:], m.ValidationActions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ValidationActions[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } if m.MatchResources != nil { { size, err := m.MatchResources.MarshalToSizedBuffer(dAtA[:i]) @@ -883,6 +1205,34 @@ func (m *ValidatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, _ = i var l int _ = l + if len(m.MatchConditions) > 0 { + for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if len(m.AuditAnnotations) > 0 { + for iNdEx := len(m.AuditAnnotations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AuditAnnotations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } if m.FailurePolicy != nil { i -= len(*m.FailurePolicy) copy(dAtA[i:], *m.FailurePolicy) @@ -931,7 +1281,7 @@ func (m *ValidatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, return len(dAtA) - i, nil } -func (m *Validation) Marshal() (dAtA []byte, err error) { +func (m *ValidatingAdmissionPolicyStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -941,27 +1291,84 @@ func (m *Validation) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Validation) MarshalTo(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicyStatus) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Validation) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicyStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Reason != nil { - i -= len(*m.Reason) - copy(dAtA[i:], *m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Reason))) - i-- - dAtA[i] = 0x1a - } - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.TypeChecking != nil { + { + size, err := m.TypeChecking.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *Validation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Validation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Validation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.MessageExpression) + copy(dAtA[i:], m.MessageExpression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MessageExpression))) + i-- + dAtA[i] = 0x22 + if m.Reason != nil { + i -= len(*m.Reason) + copy(dAtA[i:], *m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Reason))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- dAtA[i] = 0x12 i -= len(m.Expression) copy(dAtA[i:], m.Expression) @@ -982,6 +1389,45 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } +func (m *AuditAnnotation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ValueExpression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ExpressionWarning) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.FieldRef) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Warning) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *MatchCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *MatchResources) Size() (n int) { if m == nil { return 0 @@ -1058,6 +1504,21 @@ func (m *ParamRef) Size() (n int) { return n } +func (m *TypeChecking) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ExpressionWarnings) > 0 { + for _, e := range m.ExpressionWarnings { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *ValidatingAdmissionPolicy) Size() (n int) { if m == nil { return 0 @@ -1068,6 +1529,8 @@ func (m *ValidatingAdmissionPolicy) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = m.Spec.Size() n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -1117,6 +1580,12 @@ func (m *ValidatingAdmissionPolicyBindingSpec) Size() (n int) { l = m.MatchResources.Size() n += 1 + l + sovGenerated(uint64(l)) } + if len(m.ValidationActions) > 0 { + for _, s := range m.ValidationActions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -1161,6 +1630,38 @@ func (m *ValidatingAdmissionPolicySpec) Size() (n int) { l = len(*m.FailurePolicy) n += 1 + l + sovGenerated(uint64(l)) } + if len(m.AuditAnnotations) > 0 { + for _, e := range m.AuditAnnotations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.MatchConditions) > 0 { + for _, e := range m.MatchConditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingAdmissionPolicyStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + if m.TypeChecking != nil { + l = m.TypeChecking.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -1178,6 +1679,8 @@ func (m *Validation) Size() (n int) { l = len(*m.Reason) n += 1 + l + sovGenerated(uint64(l)) } + l = len(m.MessageExpression) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -1187,6 +1690,39 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *AuditAnnotation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AuditAnnotation{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `ValueExpression:` + fmt.Sprintf("%v", this.ValueExpression) + `,`, + `}`, + }, "") + return s +} +func (this *ExpressionWarning) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExpressionWarning{`, + `FieldRef:` + fmt.Sprintf("%v", this.FieldRef) + `,`, + `Warning:` + fmt.Sprintf("%v", this.Warning) + `,`, + `}`, + }, "") + return s +} +func (this *MatchCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MatchCondition{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} func (this *MatchResources) String() string { if this == nil { return "nil" @@ -1244,6 +1780,21 @@ func (this *ParamRef) String() string { }, "") return s } +func (this *TypeChecking) String() string { + if this == nil { + return "nil" + } + repeatedStringForExpressionWarnings := "[]ExpressionWarning{" + for _, f := range this.ExpressionWarnings { + repeatedStringForExpressionWarnings += strings.Replace(strings.Replace(f.String(), "ExpressionWarning", "ExpressionWarning", 1), `&`, ``, 1) + "," + } + repeatedStringForExpressionWarnings += "}" + s := strings.Join([]string{`&TypeChecking{`, + `ExpressionWarnings:` + repeatedStringForExpressionWarnings + `,`, + `}`, + }, "") + return s +} func (this *ValidatingAdmissionPolicy) String() string { if this == nil { return "nil" @@ -1251,6 +1802,7 @@ func (this *ValidatingAdmissionPolicy) String() string { s := strings.Join([]string{`&ValidatingAdmissionPolicy{`, `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicySpec", "ValidatingAdmissionPolicySpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ValidatingAdmissionPolicyStatus", "ValidatingAdmissionPolicyStatus", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1290,6 +1842,7 @@ func (this *ValidatingAdmissionPolicyBindingSpec) String() string { `PolicyName:` + fmt.Sprintf("%v", this.PolicyName) + `,`, `ParamRef:` + strings.Replace(this.ParamRef.String(), "ParamRef", "ParamRef", 1) + `,`, `MatchResources:` + strings.Replace(this.MatchResources.String(), "MatchResources", "MatchResources", 1) + `,`, + `ValidationActions:` + fmt.Sprintf("%v", this.ValidationActions) + `,`, `}`, }, "") return s @@ -1314,39 +1867,411 @@ func (this *ValidatingAdmissionPolicySpec) String() string { if this == nil { return "nil" } - repeatedStringForValidations := "[]Validation{" - for _, f := range this.Validations { - repeatedStringForValidations += strings.Replace(strings.Replace(f.String(), "Validation", "Validation", 1), `&`, ``, 1) + "," + repeatedStringForValidations := "[]Validation{" + for _, f := range this.Validations { + repeatedStringForValidations += strings.Replace(strings.Replace(f.String(), "Validation", "Validation", 1), `&`, ``, 1) + "," + } + repeatedStringForValidations += "}" + repeatedStringForAuditAnnotations := "[]AuditAnnotation{" + for _, f := range this.AuditAnnotations { + repeatedStringForAuditAnnotations += strings.Replace(strings.Replace(f.String(), "AuditAnnotation", "AuditAnnotation", 1), `&`, ``, 1) + "," + } + repeatedStringForAuditAnnotations += "}" + repeatedStringForMatchConditions := "[]MatchCondition{" + for _, f := range this.MatchConditions { + repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchConditions += "}" + s := strings.Join([]string{`&ValidatingAdmissionPolicySpec{`, + `ParamKind:` + strings.Replace(this.ParamKind.String(), "ParamKind", "ParamKind", 1) + `,`, + `MatchConstraints:` + strings.Replace(this.MatchConstraints.String(), "MatchResources", "MatchResources", 1) + `,`, + `Validations:` + repeatedStringForValidations + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `AuditAnnotations:` + repeatedStringForAuditAnnotations + `,`, + `MatchConditions:` + repeatedStringForMatchConditions + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ValidatingAdmissionPolicyStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `TypeChecking:` + strings.Replace(this.TypeChecking.String(), "TypeChecking", "TypeChecking", 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *Validation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Validation{`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Reason:` + valueToStringGenerated(this.Reason) + `,`, + `MessageExpression:` + fmt.Sprintf("%v", this.MessageExpression) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AuditAnnotation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuditAnnotation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuditAnnotation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValueExpression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValueExpression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExpressionWarning) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExpressionWarning: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExpressionWarning: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldRef = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Warning", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Warning = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF } - repeatedStringForValidations += "}" - s := strings.Join([]string{`&ValidatingAdmissionPolicySpec{`, - `ParamKind:` + strings.Replace(this.ParamKind.String(), "ParamKind", "ParamKind", 1) + `,`, - `MatchConstraints:` + strings.Replace(this.MatchConstraints.String(), "MatchResources", "MatchResources", 1) + `,`, - `Validations:` + repeatedStringForValidations + `,`, - `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, - `}`, - }, "") - return s + return nil } -func (this *Validation) String() string { - if this == nil { - return "nil" +func (m *MatchCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MatchCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MatchCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - s := strings.Join([]string{`&Validation{`, - `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `Reason:` + valueToStringGenerated(this.Reason) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" + + if iNdEx > l { + return io.ErrUnexpectedEOF } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + return nil } func (m *MatchResources) Unmarshal(dAtA []byte) error { l := len(dAtA) @@ -1914,6 +2839,90 @@ func (m *ParamRef) Unmarshal(dAtA []byte) error { } return nil } +func (m *TypeChecking) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TypeChecking: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TypeChecking: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpressionWarnings", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExpressionWarnings = append(m.ExpressionWarnings, ExpressionWarning{}) + if err := m.ExpressionWarnings[len(m.ExpressionWarnings)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ValidatingAdmissionPolicy) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -1945,7 +2954,40 @@ func (m *ValidatingAdmissionPolicy) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1972,13 +3014,13 @@ func (m *ValidatingAdmissionPolicy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2005,7 +3047,7 @@ func (m *ValidatingAdmissionPolicy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2396,6 +3438,38 @@ func (m *ValidatingAdmissionPolicyBindingSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidationActions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValidationActions = append(m.ValidationActions, ValidationAction(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -2702,6 +3776,213 @@ func (m *ValidatingAdmissionPolicySpec) Unmarshal(dAtA []byte) error { s := FailurePolicyType(dAtA[iNdEx:postIndex]) m.FailurePolicy = &s iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuditAnnotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AuditAnnotations = append(m.AuditAnnotations, AuditAnnotation{}) + if err := m.AuditAnnotations[len(m.AuditAnnotations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchConditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchConditions = append(m.MatchConditions, MatchCondition{}) + if err := m.MatchConditions[len(m.MatchConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingAdmissionPolicyStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingAdmissionPolicyStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingAdmissionPolicyStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TypeChecking", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TypeChecking == nil { + m.TypeChecking = &TypeChecking{} + } + if err := m.TypeChecking.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, v1.Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -2849,6 +4130,38 @@ func (m *Validation) Unmarshal(dAtA []byte) error { s := k8s_io_apimachinery_pkg_apis_meta_v1.StatusReason(dAtA[iNdEx:postIndex]) m.Reason = &s iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MessageExpression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MessageExpression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto index fe8236cd36..c718c5464d 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto @@ -29,6 +29,84 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "k8s.io/api/admissionregistration/v1alpha1"; +// AuditAnnotation describes how to produce an audit annotation for an API request. +message AuditAnnotation { + // key specifies the audit annotation key. The audit annotation keys of + // a ValidatingAdmissionPolicy must be unique. The key must be a qualified + // name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length. + // + // The key is combined with the resource name of the + // ValidatingAdmissionPolicy to construct an audit annotation key: + // "{ValidatingAdmissionPolicy name}/{key}". + // + // If an admission webhook uses the same resource name as this ValidatingAdmissionPolicy + // and the same audit annotation key, the annotation key will be identical. + // In this case, the first annotation written with the key will be included + // in the audit event and all subsequent annotations with the same key + // will be discarded. + // + // Required. + optional string key = 1; + + // valueExpression represents the expression which is evaluated by CEL to + // produce an audit annotation value. The expression must evaluate to either + // a string or null value. If the expression evaluates to a string, the + // audit annotation is included with the string value. If the expression + // evaluates to null or empty string the audit annotation will be omitted. + // The valueExpression may be no longer than 5kb in length. + // If the result of the valueExpression is more than 10kb in length, it + // will be truncated to 10kb. + // + // If multiple ValidatingAdmissionPolicyBinding resources match an + // API request, then the valueExpression will be evaluated for + // each binding. All unique values produced by the valueExpressions + // will be joined together in a comma-separated list. + // + // Required. + optional string valueExpression = 2; +} + +// ExpressionWarning is a warning information that targets a specific expression. +message ExpressionWarning { + // The path to the field that refers the expression. + // For example, the reference to the expression of the first item of + // validations is "spec.validations[0].expression" + optional string fieldRef = 2; + + // The content of type checking information in a human-readable form. + // Each line of the warning contains the type that the expression is checked + // against, followed by the type check error from the compiler. + optional string warning = 3; +} + +message MatchCondition { + // Name is an identifier for this match condition, used for strategic merging of MatchConditions, + // as well as providing an identifier for logging purposes. A good name should be descriptive of + // the associated expression. + // Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and + // must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or + // '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an + // optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName') + // + // Required. + optional string name = 1; + + // Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. + // CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables: + // + // 'object' - The object from the incoming request. The value is null for DELETE requests. + // 'oldObject' - The existing object. The value is null for CREATE requests. + // 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). + // 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + // + // Required. + optional string expression = 2; +} + // MatchResources decides whether to run the admission control policy on an object based // on whether it meets the match criteria. // The exclude rules take precedence over include rules (if a resource matches both, it is excluded) @@ -161,6 +239,15 @@ message ParamRef { optional string namespace = 2; } +// TypeChecking contains results of type checking the expressions in the +// ValidatingAdmissionPolicy +message TypeChecking { + // The type checking warnings for each expression. + // +optional + // +listType=atomic + repeated ExpressionWarning expressionWarnings = 1; +} + // ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it. message ValidatingAdmissionPolicy { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. @@ -169,6 +256,13 @@ message ValidatingAdmissionPolicy { // Specification of the desired behavior of the ValidatingAdmissionPolicy. optional ValidatingAdmissionPolicySpec spec = 2; + + // The status of the ValidatingAdmissionPolicy, including warnings that are useful to determine if the policy + // behaves in the expected way. + // Populated by the system. + // Read-only. + // +optional + optional ValidatingAdmissionPolicyStatus status = 3; } // ValidatingAdmissionPolicyBinding binds the ValidatingAdmissionPolicy with paramerized resources. @@ -213,6 +307,48 @@ message ValidatingAdmissionPolicyBindingSpec { // Note that this is differs from ValidatingAdmissionPolicy matchConstraints, where resourceRules are required. // +optional optional MatchResources matchResources = 3; + + // validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. + // If a validation evaluates to false it is always enforced according to these actions. + // + // Failures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according + // to these actions only if the FailurePolicy is set to Fail, otherwise the failures are + // ignored. This includes compilation errors, runtime errors and misconfigurations of the policy. + // + // validationActions is declared as a set of action values. Order does + // not matter. validationActions may not contain duplicates of the same action. + // + // The supported actions values are: + // + // "Deny" specifies that a validation failure results in a denied request. + // + // "Warn" specifies that a validation failure is reported to the request client + // in HTTP Warning headers, with a warning code of 299. Warnings can be sent + // both for allowed or denied admission responses. + // + // "Audit" specifies that a validation failure is included in the published + // audit event for the request. The audit event will contain a + // `validation.policy.admission.k8s.io/validation_failure` audit annotation + // with a value containing the details of the validation failures, formatted as + // a JSON list of objects, each with the following fields: + // - message: The validation failure message string + // - policy: The resource name of the ValidatingAdmissionPolicy + // - binding: The resource name of the ValidatingAdmissionPolicyBinding + // - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy + // - validationActions: The enforcement actions enacted for the validation failure + // Example audit annotation: + // `"validation.policy.admission.k8s.io/validation_failure": "[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]"` + // + // Clients should expect to handle additional values by ignoring + // any values not recognized. + // + // "Deny" and "Warn" may not be used together since this combination + // needlessly duplicates the validation failure both in the + // API response body and the HTTP warning headers. + // + // Required. + // +listType=set + repeated string validationActions = 4; } // ValidatingAdmissionPolicyList is a list of ValidatingAdmissionPolicy. @@ -243,30 +379,91 @@ message ValidatingAdmissionPolicySpec { optional MatchResources matchConstraints = 2; // Validations contain CEL expressions which is used to apply the validation. - // A minimum of one validation is required for a policy definition. + // Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is + // required. // +listType=atomic - // Required. + // +optional repeated Validation validations = 3; - // FailurePolicy defines how to handle failures for the admission policy. - // Failures can occur from invalid or mis-configured policy definitions or bindings. + // failurePolicy defines how to handle failures for the admission policy. Failures can + // occur from CEL expression parse errors, type check errors, runtime errors and invalid + // or mis-configured policy definitions or bindings. + // // A policy is invalid if spec.paramKind refers to a non-existent Kind. // A binding is invalid if spec.paramRef.name refers to a non-existent resource. + // + // failurePolicy does not define how validations that evaluate to false are handled. + // + // When failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions + // define how failures are enforced. + // // Allowed values are Ignore or Fail. Defaults to Fail. // +optional optional string failurePolicy = 4; + + // auditAnnotations contains CEL expressions which are used to produce audit + // annotations for the audit event of the API request. + // validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is + // required. + // +listType=atomic + // +optional + repeated AuditAnnotation auditAnnotations = 5; + + // MatchConditions is a list of conditions that must be met for a request to be validated. + // Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // If a parameter object is provided, it can be accessed via the `params` handle in the same + // manner as validation expressions. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the policy is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the policy is skipped + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + repeated MatchCondition matchConditions = 6; +} + +// ValidatingAdmissionPolicyStatus represents the status of a ValidatingAdmissionPolicy. +message ValidatingAdmissionPolicyStatus { + // The generation observed by the controller. + // +optional + optional int64 observedGeneration = 1; + + // The results of type checking for each expression. + // Presence of this field indicates the completion of the type checking. + // +optional + optional TypeChecking typeChecking = 2; + + // The conditions represent the latest available observations of a policy's current state. + // +optional + // +listType=map + // +listMapKey=type + repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 3; } // Validation specifies the CEL expression which is used to apply the validation. message Validation { // Expression represents the expression which will be evaluated by CEL. // ref: https://github.com/google/cel-spec - // CEL expressions have access to the contents of the Admission request/response, organized into CEL variables as well as some other useful variables: + // CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables: // - // 'object' - The object from the incoming request. The value is null for DELETE requests. - // 'oldObject' - The existing object. The value is null for CREATE requests. - // 'request' - Attributes of the admission request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - // 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. + // - 'object' - The object from the incoming request. The value is null for DELETE requests. + // - 'oldObject' - The existing object. The value is null for CREATE requests. + // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). + // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. + // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. // // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the // object. No other metadata properties are accessible. @@ -313,5 +510,18 @@ message Validation { // If not set, StatusReasonInvalid is used in the response to the client. // +optional optional string reason = 3; + + // messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. + // Since messageExpression is used as a failure message, it must evaluate to a string. + // If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. + // If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced + // as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string + // that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and + // the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. + // messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. + // Example: + // "object.x must be less than max ("+string(params.max)+")" + // +optional + optional string messageExpression = 4; } diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go index b64bc628f7..2bbb55a47d 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go @@ -74,6 +74,49 @@ type ValidatingAdmissionPolicy struct { metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of the ValidatingAdmissionPolicy. Spec ValidatingAdmissionPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // The status of the ValidatingAdmissionPolicy, including warnings that are useful to determine if the policy + // behaves in the expected way. + // Populated by the system. + // Read-only. + // +optional + Status ValidatingAdmissionPolicyStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ValidatingAdmissionPolicyStatus represents the status of a ValidatingAdmissionPolicy. +type ValidatingAdmissionPolicyStatus struct { + // The generation observed by the controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + // The results of type checking for each expression. + // Presence of this field indicates the completion of the type checking. + // +optional + TypeChecking *TypeChecking `json:"typeChecking,omitempty" protobuf:"bytes,2,opt,name=typeChecking"` + // The conditions represent the latest available observations of a policy's current state. + // +optional + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty" protobuf:"bytes,3,rep,name=conditions"` +} + +// TypeChecking contains results of type checking the expressions in the +// ValidatingAdmissionPolicy +type TypeChecking struct { + // The type checking warnings for each expression. + // +optional + // +listType=atomic + ExpressionWarnings []ExpressionWarning `json:"expressionWarnings,omitempty" protobuf:"bytes,1,rep,name=expressionWarnings"` +} + +// ExpressionWarning is a warning information that targets a specific expression. +type ExpressionWarning struct { + // The path to the field that refers the expression. + // For example, the reference to the expression of the first item of + // validations is "spec.validations[0].expression" + FieldRef string `json:"fieldRef" protobuf:"bytes,2,opt,name=fieldRef"` + // The content of type checking information in a human-readable form. + // Each line of the warning contains the type that the expression is checked + // against, followed by the type check error from the compiler. + Warning string `json:"warning" protobuf:"bytes,3,opt,name=warning"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -107,20 +150,61 @@ type ValidatingAdmissionPolicySpec struct { MatchConstraints *MatchResources `json:"matchConstraints,omitempty" protobuf:"bytes,2,rep,name=matchConstraints"` // Validations contain CEL expressions which is used to apply the validation. - // A minimum of one validation is required for a policy definition. + // Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is + // required. // +listType=atomic - // Required. - Validations []Validation `json:"validations" protobuf:"bytes,3,rep,name=validations"` + // +optional + Validations []Validation `json:"validations,omitempty" protobuf:"bytes,3,rep,name=validations"` - // FailurePolicy defines how to handle failures for the admission policy. - // Failures can occur from invalid or mis-configured policy definitions or bindings. + // failurePolicy defines how to handle failures for the admission policy. Failures can + // occur from CEL expression parse errors, type check errors, runtime errors and invalid + // or mis-configured policy definitions or bindings. + // // A policy is invalid if spec.paramKind refers to a non-existent Kind. // A binding is invalid if spec.paramRef.name refers to a non-existent resource. + // + // failurePolicy does not define how validations that evaluate to false are handled. + // + // When failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions + // define how failures are enforced. + // // Allowed values are Ignore or Fail. Defaults to Fail. // +optional FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,4,opt,name=failurePolicy,casttype=FailurePolicyType"` + + // auditAnnotations contains CEL expressions which are used to produce audit + // annotations for the audit event of the API request. + // validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is + // required. + // +listType=atomic + // +optional + AuditAnnotations []AuditAnnotation `json:"auditAnnotations,omitempty" protobuf:"bytes,5,rep,name=auditAnnotations"` + + // MatchConditions is a list of conditions that must be met for a request to be validated. + // Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // If a parameter object is provided, it can be accessed via the `params` handle in the same + // manner as validation expressions. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the policy is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the policy is skipped + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,6,rep,name=matchConditions"` } +type MatchCondition v1.MatchCondition + // ParamKind is a tuple of Group Kind and Version. // +structType=atomic type ParamKind struct { @@ -138,12 +222,16 @@ type ParamKind struct { type Validation struct { // Expression represents the expression which will be evaluated by CEL. // ref: https://github.com/google/cel-spec - // CEL expressions have access to the contents of the Admission request/response, organized into CEL variables as well as some other useful variables: + // CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables: // - //'object' - The object from the incoming request. The value is null for DELETE requests. - //'oldObject' - The existing object. The value is null for CREATE requests. - //'request' - Attributes of the admission request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - //'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. + // - 'object' - The object from the incoming request. The value is null for DELETE requests. + // - 'oldObject' - The existing object. The value is null for CREATE requests. + // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). + // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. + // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. // // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the // object. No other metadata properties are accessible. @@ -188,6 +276,55 @@ type Validation struct { // If not set, StatusReasonInvalid is used in the response to the client. // +optional Reason *metav1.StatusReason `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` + // messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. + // Since messageExpression is used as a failure message, it must evaluate to a string. + // If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. + // If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced + // as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string + // that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and + // the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. + // messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. + // Example: + // "object.x must be less than max ("+string(params.max)+")" + // +optional + MessageExpression string `json:"messageExpression,omitempty" protobuf:"bytes,4,opt,name=messageExpression"` +} + +// AuditAnnotation describes how to produce an audit annotation for an API request. +type AuditAnnotation struct { + // key specifies the audit annotation key. The audit annotation keys of + // a ValidatingAdmissionPolicy must be unique. The key must be a qualified + // name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length. + // + // The key is combined with the resource name of the + // ValidatingAdmissionPolicy to construct an audit annotation key: + // "{ValidatingAdmissionPolicy name}/{key}". + // + // If an admission webhook uses the same resource name as this ValidatingAdmissionPolicy + // and the same audit annotation key, the annotation key will be identical. + // In this case, the first annotation written with the key will be included + // in the audit event and all subsequent annotations with the same key + // will be discarded. + // + // Required. + Key string `json:"key" protobuf:"bytes,1,opt,name=key"` + + // valueExpression represents the expression which is evaluated by CEL to + // produce an audit annotation value. The expression must evaluate to either + // a string or null value. If the expression evaluates to a string, the + // audit annotation is included with the string value. If the expression + // evaluates to null or empty string the audit annotation will be omitted. + // The valueExpression may be no longer than 5kb in length. + // If the result of the valueExpression is more than 10kb in length, it + // will be truncated to 10kb. + // + // If multiple ValidatingAdmissionPolicyBinding resources match an + // API request, then the valueExpression will be evaluated for + // each binding. All unique values produced by the valueExpressions + // will be joined together in a comma-separated list. + // + // Required. + ValueExpression string `json:"valueExpression" protobuf:"bytes,2,opt,name=valueExpression"` } // +genclient @@ -240,6 +377,48 @@ type ValidatingAdmissionPolicyBindingSpec struct { // Note that this is differs from ValidatingAdmissionPolicy matchConstraints, where resourceRules are required. // +optional MatchResources *MatchResources `json:"matchResources,omitempty" protobuf:"bytes,3,rep,name=matchResources"` + + // validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. + // If a validation evaluates to false it is always enforced according to these actions. + // + // Failures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according + // to these actions only if the FailurePolicy is set to Fail, otherwise the failures are + // ignored. This includes compilation errors, runtime errors and misconfigurations of the policy. + // + // validationActions is declared as a set of action values. Order does + // not matter. validationActions may not contain duplicates of the same action. + // + // The supported actions values are: + // + // "Deny" specifies that a validation failure results in a denied request. + // + // "Warn" specifies that a validation failure is reported to the request client + // in HTTP Warning headers, with a warning code of 299. Warnings can be sent + // both for allowed or denied admission responses. + // + // "Audit" specifies that a validation failure is included in the published + // audit event for the request. The audit event will contain a + // `validation.policy.admission.k8s.io/validation_failure` audit annotation + // with a value containing the details of the validation failures, formatted as + // a JSON list of objects, each with the following fields: + // - message: The validation failure message string + // - policy: The resource name of the ValidatingAdmissionPolicy + // - binding: The resource name of the ValidatingAdmissionPolicyBinding + // - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy + // - validationActions: The enforcement actions enacted for the validation failure + // Example audit annotation: + // `"validation.policy.admission.k8s.io/validation_failure": "[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]"` + // + // Clients should expect to handle additional values by ignoring + // any values not recognized. + // + // "Deny" and "Warn" may not be used together since this combination + // needlessly duplicates the validation failure both in the + // API response body and the HTTP warning headers. + // + // Required. + // +listType=set + ValidationActions []ValidationAction `json:"validationActions,omitempty" protobuf:"bytes,4,rep,name=validationActions"` } // ParamRef references a parameter resource @@ -344,6 +523,24 @@ type MatchResources struct { MatchPolicy *MatchPolicyType `json:"matchPolicy,omitempty" protobuf:"bytes,7,opt,name=matchPolicy,casttype=MatchPolicyType"` } +// ValidationAction specifies a policy enforcement action. +// +enum +type ValidationAction string + +const ( + // Deny specifies that a validation failure results in a denied request. + Deny ValidationAction = "Deny" + // Warn specifies that a validation failure is reported to the request client + // in HTTP Warning headers, with a warning code of 299. Warnings can be sent + // both for allowed or denied admission responses. + Warn ValidationAction = "Warn" + // Audit specifies that a validation failure is included in the published + // audit event for the request. The audit event will contain a + // `validation.policy.admission.k8s.io/validation_failure` audit annotation + // with a value containing the details of the validation failure. + Audit ValidationAction = "Audit" +) + // NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames. // +structType=atomic type NamedRuleWithOperations struct { diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go index a670bb206d..b3cac1821b 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go @@ -24,9 +24,29 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_AuditAnnotation = map[string]string{ + "": "AuditAnnotation describes how to produce an audit annotation for an API request.", + "key": "key specifies the audit annotation key. The audit annotation keys of a ValidatingAdmissionPolicy must be unique. The key must be a qualified name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length.\n\nThe key is combined with the resource name of the ValidatingAdmissionPolicy to construct an audit annotation key: \"{ValidatingAdmissionPolicy name}/{key}\".\n\nIf an admission webhook uses the same resource name as this ValidatingAdmissionPolicy and the same audit annotation key, the annotation key will be identical. In this case, the first annotation written with the key will be included in the audit event and all subsequent annotations with the same key will be discarded.\n\nRequired.", + "valueExpression": "valueExpression represents the expression which is evaluated by CEL to produce an audit annotation value. The expression must evaluate to either a string or null value. If the expression evaluates to a string, the audit annotation is included with the string value. If the expression evaluates to null or empty string the audit annotation will be omitted. The valueExpression may be no longer than 5kb in length. If the result of the valueExpression is more than 10kb in length, it will be truncated to 10kb.\n\nIf multiple ValidatingAdmissionPolicyBinding resources match an API request, then the valueExpression will be evaluated for each binding. All unique values produced by the valueExpressions will be joined together in a comma-separated list.\n\nRequired.", +} + +func (AuditAnnotation) SwaggerDoc() map[string]string { + return map_AuditAnnotation +} + +var map_ExpressionWarning = map[string]string{ + "": "ExpressionWarning is a warning information that targets a specific expression.", + "fieldRef": "The path to the field that refers the expression. For example, the reference to the expression of the first item of validations is \"spec.validations[0].expression\"", + "warning": "The content of type checking information in a human-readable form. Each line of the warning contains the type that the expression is checked against, followed by the type check error from the compiler.", +} + +func (ExpressionWarning) SwaggerDoc() map[string]string { + return map_ExpressionWarning +} + var map_MatchResources = map[string]string{ "": "MatchResources decides whether to run the admission control policy on an object based on whether it meets the match criteria. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)", "namespaceSelector": "NamespaceSelector decides whether to run the admission control policy on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the policy.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the policy on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", @@ -69,10 +89,20 @@ func (ParamRef) SwaggerDoc() map[string]string { return map_ParamRef } +var map_TypeChecking = map[string]string{ + "": "TypeChecking contains results of type checking the expressions in the ValidatingAdmissionPolicy", + "expressionWarnings": "The type checking warnings for each expression.", +} + +func (TypeChecking) SwaggerDoc() map[string]string { + return map_TypeChecking +} + var map_ValidatingAdmissionPolicy = map[string]string{ "": "ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it.", "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", "spec": "Specification of the desired behavior of the ValidatingAdmissionPolicy.", + "status": "The status of the ValidatingAdmissionPolicy, including warnings that are useful to determine if the policy behaves in the expected way. Populated by the system. Read-only.", } func (ValidatingAdmissionPolicy) SwaggerDoc() map[string]string { @@ -100,10 +130,11 @@ func (ValidatingAdmissionPolicyBindingList) SwaggerDoc() map[string]string { } var map_ValidatingAdmissionPolicyBindingSpec = map[string]string{ - "": "ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding.", - "policyName": "PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to. If the referenced resource does not exist, this binding is considered invalid and will be ignored Required.", - "paramRef": "ParamRef specifies the parameter resource used to configure the admission control policy. It should point to a resource of the type specified in ParamKind of the bound ValidatingAdmissionPolicy. If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the ValidatingAdmissionPolicy applied.", - "matchResources": "MatchResources declares what resources match this binding and will be validated by it. Note that this is intersected with the policy's matchConstraints, so only requests that are matched by the policy can be selected by this. If this is unset, all resources matched by the policy are validated by this binding When resourceRules is unset, it does not constrain resource matching. If a resource is matched by the other fields of this object, it will be validated. Note that this is differs from ValidatingAdmissionPolicy matchConstraints, where resourceRules are required.", + "": "ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding.", + "policyName": "PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to. If the referenced resource does not exist, this binding is considered invalid and will be ignored Required.", + "paramRef": "ParamRef specifies the parameter resource used to configure the admission control policy. It should point to a resource of the type specified in ParamKind of the bound ValidatingAdmissionPolicy. If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the ValidatingAdmissionPolicy applied.", + "matchResources": "MatchResources declares what resources match this binding and will be validated by it. Note that this is intersected with the policy's matchConstraints, so only requests that are matched by the policy can be selected by this. If this is unset, all resources matched by the policy are validated by this binding When resourceRules is unset, it does not constrain resource matching. If a resource is matched by the other fields of this object, it will be validated. Note that this is differs from ValidatingAdmissionPolicy matchConstraints, where resourceRules are required.", + "validationActions": "validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. If a validation evaluates to false it is always enforced according to these actions.\n\nFailures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according to these actions only if the FailurePolicy is set to Fail, otherwise the failures are ignored. This includes compilation errors, runtime errors and misconfigurations of the policy.\n\nvalidationActions is declared as a set of action values. Order does not matter. validationActions may not contain duplicates of the same action.\n\nThe supported actions values are:\n\n\"Deny\" specifies that a validation failure results in a denied request.\n\n\"Warn\" specifies that a validation failure is reported to the request client in HTTP Warning headers, with a warning code of 299. Warnings can be sent both for allowed or denied admission responses.\n\n\"Audit\" specifies that a validation failure is included in the published audit event for the request. The audit event will contain a `validation.policy.admission.k8s.io/validation_failure` audit annotation with a value containing the details of the validation failures, formatted as a JSON list of objects, each with the following fields: - message: The validation failure message string - policy: The resource name of the ValidatingAdmissionPolicy - binding: The resource name of the ValidatingAdmissionPolicyBinding - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy - validationActions: The enforcement actions enacted for the validation failure Example audit annotation: `\"validation.policy.admission.k8s.io/validation_failure\": \"[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]\"`\n\nClients should expect to handle additional values by ignoring any values not recognized.\n\n\"Deny\" and \"Warn\" may not be used together since this combination needlessly duplicates the validation failure both in the API response body and the HTTP warning headers.\n\nRequired.", } func (ValidatingAdmissionPolicyBindingSpec) SwaggerDoc() map[string]string { @@ -124,19 +155,33 @@ var map_ValidatingAdmissionPolicySpec = map[string]string{ "": "ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy.", "paramKind": "ParamKind specifies the kind of resources used to parameterize this policy. If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions. If ParamKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied. If paramKind is specified but paramRef is unset in ValidatingAdmissionPolicyBinding, the params variable will be null.", "matchConstraints": "MatchConstraints specifies what resources this policy is designed to validate. The AdmissionPolicy cares about a request if it matches _all_ Constraints. However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API ValidatingAdmissionPolicy cannot match ValidatingAdmissionPolicy and ValidatingAdmissionPolicyBinding. Required.", - "validations": "Validations contain CEL expressions which is used to apply the validation. A minimum of one validation is required for a policy definition. Required.", - "failurePolicy": "FailurePolicy defines how to handle failures for the admission policy. Failures can occur from invalid or mis-configured policy definitions or bindings. A policy is invalid if spec.paramKind refers to a non-existent Kind. A binding is invalid if spec.paramRef.name refers to a non-existent resource. Allowed values are Ignore or Fail. Defaults to Fail.", + "validations": "Validations contain CEL expressions which is used to apply the validation. Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is required.", + "failurePolicy": "failurePolicy defines how to handle failures for the admission policy. Failures can occur from CEL expression parse errors, type check errors, runtime errors and invalid or mis-configured policy definitions or bindings.\n\nA policy is invalid if spec.paramKind refers to a non-existent Kind. A binding is invalid if spec.paramRef.name refers to a non-existent resource.\n\nfailurePolicy does not define how validations that evaluate to false are handled.\n\nWhen failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions define how failures are enforced.\n\nAllowed values are Ignore or Fail. Defaults to Fail.", + "auditAnnotations": "auditAnnotations contains CEL expressions which are used to produce audit annotations for the audit event of the API request. validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is required.", + "matchConditions": "MatchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nIf a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the policy is skipped", } func (ValidatingAdmissionPolicySpec) SwaggerDoc() map[string]string { return map_ValidatingAdmissionPolicySpec } +var map_ValidatingAdmissionPolicyStatus = map[string]string{ + "": "ValidatingAdmissionPolicyStatus represents the status of a ValidatingAdmissionPolicy.", + "observedGeneration": "The generation observed by the controller.", + "typeChecking": "The results of type checking for each expression. Presence of this field indicates the completion of the type checking.", + "conditions": "The conditions represent the latest available observations of a policy's current state.", +} + +func (ValidatingAdmissionPolicyStatus) SwaggerDoc() map[string]string { + return map_ValidatingAdmissionPolicyStatus +} + var map_Validation = map[string]string{ - "": "Validation specifies the CEL expression which is used to apply the validation.", - "expression": "Expression represents the expression which will be evaluated by CEL. ref: https://github.com/google/cel-spec CEL expressions have access to the contents of the Admission request/response, organized into CEL variables as well as some other useful variables:\n\n'object' - The object from the incoming request. The value is null for DELETE requests. 'oldObject' - The existing object. The value is null for CREATE requests. 'request' - Attributes of the admission request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind.\n\nThe `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object. No other metadata properties are accessible.\n\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Accessible property names are escaped according to the following rules when accessed in the expression: - '__' escapes to '__underscores__' - '.' escapes to '__dot__' - '-' escapes to '__dash__' - '/' escapes to '__slash__' - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are:\n\t \"true\", \"false\", \"null\", \"in\", \"as\", \"break\", \"const\", \"continue\", \"else\", \"for\", \"function\", \"if\",\n\t \"import\", \"let\", \"loop\", \"package\", \"namespace\", \"return\".\nExamples:\n - Expression accessing a property named \"namespace\": {\"Expression\": \"object.__namespace__ > 0\"}\n - Expression accessing a property named \"x-prop\": {\"Expression\": \"object.x__dash__prop > 0\"}\n - Expression accessing a property named \"redact__d\": {\"Expression\": \"object.redact__underscores__d > 0\"}\n\nEquality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type:\n - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and\n non-intersecting elements in `Y` are appended, retaining their partial order.\n - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values\n are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with\n non-intersecting keys are appended, retaining their partial order.\nRequired.", - "message": "Message represents the message displayed when validation fails. The message is required if the Expression contains line breaks. The message must not contain line breaks. If unset, the message is \"failed rule: {Rule}\". e.g. \"must be a URL with the host matching spec.host\" If the Expression contains line breaks. Message is required. The message must not contain line breaks. If unset, the message is \"failed Expression: {Expression}\".", - "reason": "Reason represents a machine-readable description of why this validation failed. If this is the first validation in the list to fail, this reason, as well as the corresponding HTTP response code, are used in the HTTP response to the client. The currently supported reasons are: \"Unauthorized\", \"Forbidden\", \"Invalid\", \"RequestEntityTooLarge\". If not set, StatusReasonInvalid is used in the response to the client.", + "": "Validation specifies the CEL expression which is used to apply the validation.", + "expression": "Expression represents the expression which will be evaluated by CEL. ref: https://github.com/google/cel-spec CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables:\n\n- 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n- 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\n\nThe `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object. No other metadata properties are accessible.\n\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Accessible property names are escaped according to the following rules when accessed in the expression: - '__' escapes to '__underscores__' - '.' escapes to '__dot__' - '-' escapes to '__dash__' - '/' escapes to '__slash__' - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are:\n\t \"true\", \"false\", \"null\", \"in\", \"as\", \"break\", \"const\", \"continue\", \"else\", \"for\", \"function\", \"if\",\n\t \"import\", \"let\", \"loop\", \"package\", \"namespace\", \"return\".\nExamples:\n - Expression accessing a property named \"namespace\": {\"Expression\": \"object.__namespace__ > 0\"}\n - Expression accessing a property named \"x-prop\": {\"Expression\": \"object.x__dash__prop > 0\"}\n - Expression accessing a property named \"redact__d\": {\"Expression\": \"object.redact__underscores__d > 0\"}\n\nEquality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type:\n - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and\n non-intersecting elements in `Y` are appended, retaining their partial order.\n - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values\n are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with\n non-intersecting keys are appended, retaining their partial order.\nRequired.", + "message": "Message represents the message displayed when validation fails. The message is required if the Expression contains line breaks. The message must not contain line breaks. If unset, the message is \"failed rule: {Rule}\". e.g. \"must be a URL with the host matching spec.host\" If the Expression contains line breaks. Message is required. The message must not contain line breaks. If unset, the message is \"failed Expression: {Expression}\".", + "reason": "Reason represents a machine-readable description of why this validation failed. If this is the first validation in the list to fail, this reason, as well as the corresponding HTTP response code, are used in the HTTP response to the client. The currently supported reasons are: \"Unauthorized\", \"Forbidden\", \"Invalid\", \"RequestEntityTooLarge\". If not set, StatusReasonInvalid is used in the response to the client.", + "messageExpression": "messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. Since messageExpression is used as a failure message, it must evaluate to a string. If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. Example: \"object.x must be less than max (\"+string(params.max)+\")\"", } func (Validation) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go index 4f29ac7a94..8e4abfd087 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go @@ -26,6 +26,54 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditAnnotation) DeepCopyInto(out *AuditAnnotation) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditAnnotation. +func (in *AuditAnnotation) DeepCopy() *AuditAnnotation { + if in == nil { + return nil + } + out := new(AuditAnnotation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressionWarning) DeepCopyInto(out *ExpressionWarning) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressionWarning. +func (in *ExpressionWarning) DeepCopy() *ExpressionWarning { + if in == nil { + return nil + } + out := new(ExpressionWarning) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchCondition) DeepCopyInto(out *MatchCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchCondition. +func (in *MatchCondition) DeepCopy() *MatchCondition { + if in == nil { + return nil + } + out := new(MatchCondition) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MatchResources) DeepCopyInto(out *MatchResources) { *out = *in @@ -125,12 +173,34 @@ func (in *ParamRef) DeepCopy() *ParamRef { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TypeChecking) DeepCopyInto(out *TypeChecking) { + *out = *in + if in.ExpressionWarnings != nil { + in, out := &in.ExpressionWarnings, &out.ExpressionWarnings + *out = make([]ExpressionWarning, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypeChecking. +func (in *TypeChecking) DeepCopy() *TypeChecking { + if in == nil { + return nil + } + out := new(TypeChecking) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ValidatingAdmissionPolicy) DeepCopyInto(out *ValidatingAdmissionPolicy) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) return } @@ -225,6 +295,11 @@ func (in *ValidatingAdmissionPolicyBindingSpec) DeepCopyInto(out *ValidatingAdmi *out = new(MatchResources) (*in).DeepCopyInto(*out) } + if in.ValidationActions != nil { + in, out := &in.ValidationActions, &out.ValidationActions + *out = make([]ValidationAction, len(*in)) + copy(*out, *in) + } return } @@ -296,6 +371,16 @@ func (in *ValidatingAdmissionPolicySpec) DeepCopyInto(out *ValidatingAdmissionPo *out = new(FailurePolicyType) **out = **in } + if in.AuditAnnotations != nil { + in, out := &in.AuditAnnotations, &out.AuditAnnotations + *out = make([]AuditAnnotation, len(*in)) + copy(*out, *in) + } + if in.MatchConditions != nil { + in, out := &in.MatchConditions, &out.MatchConditions + *out = make([]MatchCondition, len(*in)) + copy(*out, *in) + } return } @@ -309,6 +394,34 @@ func (in *ValidatingAdmissionPolicySpec) DeepCopy() *ValidatingAdmissionPolicySp return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingAdmissionPolicyStatus) DeepCopyInto(out *ValidatingAdmissionPolicyStatus) { + *out = *in + if in.TypeChecking != nil { + in, out := &in.TypeChecking, &out.TypeChecking + *out = new(TypeChecking) + (*in).DeepCopyInto(*out) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyStatus. +func (in *ValidatingAdmissionPolicyStatus) DeepCopy() *ValidatingAdmissionPolicyStatus { + if in == nil { + return nil + } + out := new(ValidatingAdmissionPolicyStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Validation) DeepCopyInto(out *Validation) { *out = *in diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go index 56a9f10e5c..8fb354c319 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go @@ -45,10 +45,38 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +func (m *MatchCondition) Reset() { *m = MatchCondition{} } +func (*MatchCondition) ProtoMessage() {} +func (*MatchCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{0} +} +func (m *MatchCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MatchCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MatchCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_MatchCondition.Merge(m, src) +} +func (m *MatchCondition) XXX_Size() int { + return m.Size() +} +func (m *MatchCondition) XXX_DiscardUnknown() { + xxx_messageInfo_MatchCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_MatchCondition proto.InternalMessageInfo + func (m *MutatingWebhook) Reset() { *m = MutatingWebhook{} } func (*MutatingWebhook) ProtoMessage() {} func (*MutatingWebhook) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{0} + return fileDescriptor_abeea74cbc46f55a, []int{1} } func (m *MutatingWebhook) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -76,7 +104,7 @@ var xxx_messageInfo_MutatingWebhook proto.InternalMessageInfo func (m *MutatingWebhookConfiguration) Reset() { *m = MutatingWebhookConfiguration{} } func (*MutatingWebhookConfiguration) ProtoMessage() {} func (*MutatingWebhookConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{1} + return fileDescriptor_abeea74cbc46f55a, []int{2} } func (m *MutatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -104,7 +132,7 @@ var xxx_messageInfo_MutatingWebhookConfiguration proto.InternalMessageInfo func (m *MutatingWebhookConfigurationList) Reset() { *m = MutatingWebhookConfigurationList{} } func (*MutatingWebhookConfigurationList) ProtoMessage() {} func (*MutatingWebhookConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{2} + return fileDescriptor_abeea74cbc46f55a, []int{3} } func (m *MutatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -132,7 +160,7 @@ var xxx_messageInfo_MutatingWebhookConfigurationList proto.InternalMessageInfo func (m *ServiceReference) Reset() { *m = ServiceReference{} } func (*ServiceReference) ProtoMessage() {} func (*ServiceReference) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{3} + return fileDescriptor_abeea74cbc46f55a, []int{4} } func (m *ServiceReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -160,7 +188,7 @@ var xxx_messageInfo_ServiceReference proto.InternalMessageInfo func (m *ValidatingWebhook) Reset() { *m = ValidatingWebhook{} } func (*ValidatingWebhook) ProtoMessage() {} func (*ValidatingWebhook) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{4} + return fileDescriptor_abeea74cbc46f55a, []int{5} } func (m *ValidatingWebhook) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -188,7 +216,7 @@ var xxx_messageInfo_ValidatingWebhook proto.InternalMessageInfo func (m *ValidatingWebhookConfiguration) Reset() { *m = ValidatingWebhookConfiguration{} } func (*ValidatingWebhookConfiguration) ProtoMessage() {} func (*ValidatingWebhookConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{5} + return fileDescriptor_abeea74cbc46f55a, []int{6} } func (m *ValidatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -216,7 +244,7 @@ var xxx_messageInfo_ValidatingWebhookConfiguration proto.InternalMessageInfo func (m *ValidatingWebhookConfigurationList) Reset() { *m = ValidatingWebhookConfigurationList{} } func (*ValidatingWebhookConfigurationList) ProtoMessage() {} func (*ValidatingWebhookConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{6} + return fileDescriptor_abeea74cbc46f55a, []int{7} } func (m *ValidatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -244,7 +272,7 @@ var xxx_messageInfo_ValidatingWebhookConfigurationList proto.InternalMessageInfo func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } func (*WebhookClientConfig) ProtoMessage() {} func (*WebhookClientConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{7} + return fileDescriptor_abeea74cbc46f55a, []int{8} } func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -270,6 +298,7 @@ func (m *WebhookClientConfig) XXX_DiscardUnknown() { var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo func init() { + proto.RegisterType((*MatchCondition)(nil), "k8s.io.api.admissionregistration.v1beta1.MatchCondition") proto.RegisterType((*MutatingWebhook)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhook") proto.RegisterType((*MutatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfiguration") proto.RegisterType((*MutatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfigurationList") @@ -285,68 +314,106 @@ func init() { } var fileDescriptor_abeea74cbc46f55a = []byte{ - // 974 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x57, 0x49, 0x6f, 0xdb, 0x46, - 0x14, 0x36, 0x2d, 0x29, 0x92, 0x46, 0xb2, 0x13, 0x4d, 0x97, 0xb0, 0x6e, 0x40, 0x0a, 0x3a, 0x14, - 0xba, 0x94, 0x4c, 0x9c, 0xa2, 0x4b, 0x8a, 0x1e, 0x42, 0xb7, 0x41, 0x0b, 0xd8, 0x4e, 0x3a, 0xce, - 0x02, 0xb4, 0x29, 0x90, 0x11, 0xf5, 0x24, 0x4d, 0x45, 0x72, 0x04, 0xce, 0x50, 0xa9, 0x6f, 0xfd, - 0x09, 0xfd, 0x0b, 0xfd, 0x21, 0xbd, 0xf5, 0xe0, 0x63, 0x8e, 0xb9, 0x94, 0xa8, 0xd9, 0x5e, 0x7b, - 0xe8, 0xd5, 0xa7, 0x82, 0x8b, 0x76, 0x39, 0x21, 0x5c, 0x20, 0x27, 0xdf, 0x34, 0xdf, 0xe3, 0xf7, - 0xbd, 0x79, 0x6f, 0xde, 0x02, 0xa1, 0x6f, 0x87, 0x9f, 0x0a, 0x83, 0x71, 0x73, 0x18, 0x74, 0xc0, - 0xf7, 0x40, 0x82, 0x30, 0xc7, 0xe0, 0x75, 0xb9, 0x6f, 0x66, 0x06, 0x3a, 0x62, 0x26, 0xed, 0xba, - 0x4c, 0x08, 0xc6, 0x3d, 0x1f, 0xfa, 0x4c, 0x48, 0x9f, 0x4a, 0xc6, 0x3d, 0x73, 0x7c, 0xab, 0x03, - 0x92, 0xde, 0x32, 0xfb, 0xe0, 0x81, 0x4f, 0x25, 0x74, 0x8d, 0x91, 0xcf, 0x25, 0xc7, 0xed, 0x94, - 0x69, 0xd0, 0x11, 0x33, 0xd6, 0x32, 0x8d, 0x8c, 0xb9, 0xf3, 0x61, 0x9f, 0xc9, 0x41, 0xd0, 0x31, - 0x6c, 0xee, 0x9a, 0x7d, 0xde, 0xe7, 0x66, 0x22, 0xd0, 0x09, 0x7a, 0xc9, 0x29, 0x39, 0x24, 0xbf, - 0x52, 0xe1, 0x9d, 0xdb, 0x39, 0xae, 0xb4, 0x7c, 0x9b, 0x9d, 0x8f, 0x66, 0x24, 0x97, 0xda, 0x03, - 0xe6, 0x81, 0x7f, 0x6c, 0x8e, 0x86, 0xfd, 0x18, 0x10, 0xa6, 0x0b, 0x92, 0xae, 0x63, 0x99, 0xe7, - 0xb1, 0xfc, 0xc0, 0x93, 0xcc, 0x85, 0x15, 0xc2, 0xc7, 0xaf, 0x23, 0x08, 0x7b, 0x00, 0x2e, 0x5d, - 0xe6, 0xb5, 0x7e, 0x2f, 0xa3, 0xab, 0x07, 0x81, 0xa4, 0x92, 0x79, 0xfd, 0x27, 0xd0, 0x19, 0x70, - 0x3e, 0xc4, 0x4d, 0x54, 0xf4, 0xa8, 0x0b, 0xaa, 0xd2, 0x54, 0xda, 0x55, 0xab, 0x7e, 0x12, 0xea, - 0x1b, 0x51, 0xa8, 0x17, 0x0f, 0xa9, 0x0b, 0x24, 0xb1, 0xe0, 0xe7, 0xa8, 0x6e, 0x3b, 0x0c, 0x3c, - 0xb9, 0xc7, 0xbd, 0x1e, 0xeb, 0xab, 0x9b, 0x4d, 0xa5, 0x5d, 0xdb, 0xfd, 0xc2, 0xc8, 0x9b, 0x79, - 0x23, 0x73, 0xb5, 0x37, 0x27, 0x62, 0xbd, 0x9d, 0x39, 0xaa, 0xcf, 0xa3, 0x64, 0xc1, 0x11, 0x7e, - 0x8a, 0x4a, 0x7e, 0xe0, 0x80, 0x50, 0x0b, 0xcd, 0x42, 0xbb, 0xb6, 0xfb, 0x49, 0x1e, 0x8f, 0x06, - 0x09, 0x1c, 0x78, 0xc2, 0xe4, 0xe0, 0xfe, 0x08, 0x52, 0x50, 0x58, 0x5b, 0x99, 0xaf, 0x52, 0x6c, - 0x13, 0x24, 0x15, 0xc5, 0xfb, 0x68, 0xab, 0x47, 0x99, 0x13, 0xf8, 0xf0, 0x80, 0x3b, 0xcc, 0x3e, - 0x56, 0x8b, 0x49, 0x06, 0x3e, 0x88, 0x42, 0x7d, 0xeb, 0xde, 0xbc, 0xe1, 0x2c, 0xd4, 0x1b, 0x0b, - 0xc0, 0xc3, 0xe3, 0x11, 0x90, 0x45, 0x32, 0xfe, 0x12, 0xd5, 0x5c, 0x2a, 0xed, 0x41, 0xa6, 0x55, - 0x4d, 0xb4, 0x5a, 0x51, 0xa8, 0xd7, 0x0e, 0x66, 0xf0, 0x59, 0xa8, 0x5f, 0x9d, 0x3b, 0x26, 0x3a, - 0xf3, 0x34, 0xfc, 0x13, 0x6a, 0xc4, 0x29, 0x17, 0x23, 0x6a, 0xc3, 0x11, 0x38, 0x60, 0x4b, 0xee, - 0xab, 0xa5, 0x24, 0xdf, 0xb7, 0xe7, 0xa2, 0x9f, 0x3e, 0xba, 0x31, 0x1a, 0xf6, 0x63, 0x40, 0x18, - 0x71, 0x6d, 0xc5, 0xe1, 0xef, 0xd3, 0x0e, 0x38, 0x13, 0xaa, 0xf5, 0x4e, 0x14, 0xea, 0x8d, 0xc3, - 0x65, 0x45, 0xb2, 0xea, 0x04, 0x73, 0xb4, 0xcd, 0x3b, 0x3f, 0x82, 0x2d, 0xa7, 0x6e, 0x6b, 0x17, - 0x77, 0x8b, 0xa3, 0x50, 0xdf, 0xbe, 0xbf, 0x20, 0x47, 0x96, 0xe4, 0xe3, 0x84, 0x09, 0xd6, 0x85, - 0xaf, 0x7a, 0x3d, 0xb0, 0xa5, 0x50, 0xaf, 0xcc, 0x12, 0x76, 0x34, 0x83, 0xe3, 0x84, 0xcd, 0x8e, - 0x7b, 0x0e, 0x15, 0x82, 0xcc, 0xd3, 0xf0, 0x1d, 0xb4, 0x1d, 0x17, 0x3c, 0x0f, 0xe4, 0x11, 0xd8, - 0xdc, 0xeb, 0x0a, 0xb5, 0xdc, 0x54, 0xda, 0xa5, 0xf4, 0x06, 0x0f, 0x17, 0x2c, 0x64, 0xe9, 0x4b, - 0xfc, 0x08, 0x5d, 0x9f, 0x56, 0x11, 0x81, 0x31, 0x83, 0xe7, 0x8f, 0xc1, 0x8f, 0x0f, 0x42, 0xad, - 0x34, 0x0b, 0xed, 0xaa, 0xf5, 0x7e, 0x14, 0xea, 0xd7, 0xef, 0xae, 0xff, 0x84, 0x9c, 0xc7, 0xc5, - 0xcf, 0x10, 0xf6, 0x81, 0x79, 0x63, 0x6e, 0x27, 0xe5, 0x97, 0x15, 0x04, 0x4a, 0xe2, 0xbb, 0x19, - 0x85, 0x3a, 0x26, 0x2b, 0xd6, 0xb3, 0x50, 0x7f, 0x77, 0x15, 0x4d, 0xca, 0x63, 0x8d, 0x56, 0xeb, - 0x0f, 0x05, 0xdd, 0x58, 0x6a, 0xe3, 0xb4, 0x63, 0x82, 0xb4, 0xe2, 0xf1, 0x33, 0x54, 0x89, 0x1f, - 0xa6, 0x4b, 0x25, 0x4d, 0xfa, 0xba, 0xb6, 0x7b, 0x33, 0xdf, 0x33, 0xa6, 0x6f, 0x76, 0x00, 0x92, - 0x5a, 0x38, 0x6b, 0x1a, 0x34, 0xc3, 0xc8, 0x54, 0x15, 0x7f, 0x8f, 0x2a, 0x99, 0x67, 0xa1, 0x6e, - 0x26, 0xdd, 0xf9, 0x59, 0xfe, 0x79, 0xb0, 0x74, 0x77, 0xab, 0x18, 0xbb, 0x22, 0x53, 0xc1, 0xd6, - 0x3f, 0x0a, 0x6a, 0xbe, 0x2a, 0xbe, 0x7d, 0x26, 0x24, 0x7e, 0xba, 0x12, 0xa3, 0x91, 0xb3, 0x54, - 0x99, 0x48, 0x23, 0xbc, 0x96, 0x45, 0x58, 0x99, 0x20, 0x73, 0xf1, 0x0d, 0x51, 0x89, 0x49, 0x70, - 0x27, 0xc1, 0xdd, 0xbb, 0x70, 0x70, 0x0b, 0x17, 0x9f, 0x4d, 0xa2, 0x6f, 0x62, 0x71, 0x92, 0xfa, - 0x68, 0xfd, 0xaa, 0xa0, 0x6b, 0x47, 0xe0, 0x8f, 0x99, 0x0d, 0x04, 0x7a, 0xe0, 0x83, 0x67, 0x03, - 0x36, 0x51, 0x75, 0xda, 0xa5, 0xd9, 0x70, 0x6e, 0x64, 0xec, 0xea, 0xb4, 0xa3, 0xc9, 0xec, 0x9b, - 0xe9, 0x20, 0xdf, 0x3c, 0x77, 0x90, 0xdf, 0x40, 0xc5, 0x11, 0x95, 0x03, 0xb5, 0x90, 0x7c, 0x51, - 0x89, 0xad, 0x0f, 0xa8, 0x1c, 0x90, 0x04, 0x4d, 0xac, 0xdc, 0x97, 0xc9, 0x18, 0x2c, 0x65, 0x56, - 0xee, 0x4b, 0x92, 0xa0, 0xad, 0xbf, 0xaf, 0xa0, 0xc6, 0x63, 0xea, 0xb0, 0xee, 0xe5, 0xf2, 0xb8, - 0x5c, 0x1e, 0xaf, 0x5f, 0x1e, 0xe8, 0x72, 0x79, 0x5c, 0x64, 0x79, 0xb4, 0x4e, 0x15, 0xa4, 0xad, - 0xb4, 0xd9, 0x9b, 0x1e, 0xee, 0x3f, 0xac, 0x0c, 0xf7, 0xcf, 0xf3, 0xf7, 0xeb, 0xca, 0xed, 0x57, - 0xc6, 0xfb, 0xbf, 0x0a, 0x6a, 0xbd, 0x3a, 0xc6, 0x37, 0x30, 0xe0, 0xdd, 0xc5, 0x01, 0xff, 0xf5, - 0xff, 0x08, 0x30, 0xcf, 0x88, 0xff, 0x4d, 0x41, 0x6f, 0xad, 0x99, 0x64, 0xf8, 0x3d, 0x54, 0x08, - 0x7c, 0x27, 0x9b, 0xc8, 0xe5, 0x28, 0xd4, 0x0b, 0x8f, 0xc8, 0x3e, 0x89, 0x31, 0x4c, 0x51, 0x59, - 0xa4, 0x4b, 0x21, 0x0b, 0xff, 0x4e, 0xfe, 0x3b, 0x2e, 0x6f, 0x13, 0xab, 0x16, 0x85, 0x7a, 0x79, - 0x82, 0x4e, 0x74, 0x71, 0x1b, 0x55, 0x6c, 0x6a, 0x05, 0x5e, 0xd7, 0x49, 0xd7, 0x46, 0xdd, 0xaa, - 0xc7, 0xe9, 0xda, 0xbb, 0x9b, 0x62, 0x64, 0x6a, 0xb5, 0x0e, 0x4f, 0x4e, 0xb5, 0x8d, 0x17, 0xa7, - 0xda, 0xc6, 0xcb, 0x53, 0x6d, 0xe3, 0xe7, 0x48, 0x53, 0x4e, 0x22, 0x4d, 0x79, 0x11, 0x69, 0xca, - 0xcb, 0x48, 0x53, 0xfe, 0x8c, 0x34, 0xe5, 0x97, 0xbf, 0xb4, 0x8d, 0xef, 0xda, 0x79, 0xff, 0xc6, - 0xfd, 0x17, 0x00, 0x00, 0xff, 0xff, 0x7e, 0xc9, 0x34, 0x4c, 0x0a, 0x0e, 0x00, 0x00, + // 1041 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x57, 0x4f, 0x73, 0xdb, 0xc4, + 0x1b, 0x8e, 0xe2, 0xf8, 0x17, 0x67, 0xed, 0x24, 0xcd, 0xfe, 0x80, 0x88, 0xd0, 0xb1, 0x3c, 0x3e, + 0x30, 0xbe, 0x20, 0xb5, 0x29, 0x03, 0xa5, 0x0c, 0x87, 0x2a, 0xb4, 0x03, 0x33, 0x49, 0x5a, 0x36, + 0xfd, 0x33, 0x03, 0x65, 0xa6, 0x6b, 0xf9, 0xb5, 0xbd, 0x58, 0xd2, 0x7a, 0xb4, 0xab, 0xb4, 0x19, + 0x2e, 0x7c, 0x04, 0xbe, 0x02, 0x1f, 0x84, 0x03, 0xb7, 0x1c, 0x7b, 0xec, 0x05, 0x0d, 0x11, 0x67, + 0x0e, 0x5c, 0x73, 0x62, 0xb4, 0x52, 0x6c, 0xcb, 0x76, 0x5a, 0x11, 0x66, 0x72, 0xca, 0xcd, 0xfb, + 0xbc, 0xfb, 0xbe, 0xcf, 0x3e, 0xab, 0x77, 0xdf, 0x67, 0x8c, 0xbe, 0x19, 0xdc, 0x16, 0x26, 0xe3, + 0xd6, 0x20, 0x6c, 0x43, 0xe0, 0x83, 0x04, 0x61, 0x1d, 0x82, 0xdf, 0xe1, 0x81, 0x95, 0x05, 0xe8, + 0x90, 0x59, 0xb4, 0xe3, 0x31, 0x21, 0x18, 0xf7, 0x03, 0xe8, 0x31, 0x21, 0x03, 0x2a, 0x19, 0xf7, + 0xad, 0xc3, 0x9b, 0x6d, 0x90, 0xf4, 0xa6, 0xd5, 0x03, 0x1f, 0x02, 0x2a, 0xa1, 0x63, 0x0e, 0x03, + 0x2e, 0x39, 0x6e, 0xa5, 0x99, 0x26, 0x1d, 0x32, 0x73, 0x6e, 0xa6, 0x99, 0x65, 0x6e, 0x7d, 0xd4, + 0x63, 0xb2, 0x1f, 0xb6, 0x4d, 0x87, 0x7b, 0x56, 0x8f, 0xf7, 0xb8, 0xa5, 0x0a, 0xb4, 0xc3, 0xae, + 0x5a, 0xa9, 0x85, 0xfa, 0x95, 0x16, 0xde, 0xba, 0x55, 0xe0, 0x48, 0xd3, 0xa7, 0xd9, 0xfa, 0x78, + 0x9c, 0xe4, 0x51, 0xa7, 0xcf, 0x7c, 0x08, 0x8e, 0xac, 0xe1, 0xa0, 0x97, 0x00, 0xc2, 0xf2, 0x40, + 0xd2, 0x79, 0x59, 0xd6, 0x79, 0x59, 0x41, 0xe8, 0x4b, 0xe6, 0xc1, 0x4c, 0xc2, 0x27, 0x6f, 0x4b, + 0x10, 0x4e, 0x1f, 0x3c, 0x3a, 0x9d, 0xd7, 0xec, 0xa2, 0xb5, 0x3d, 0x2a, 0x9d, 0xfe, 0x0e, 0xf7, + 0x3b, 0x2c, 0xd1, 0x80, 0x1b, 0x68, 0xc9, 0xa7, 0x1e, 0xe8, 0x5a, 0x43, 0x6b, 0xad, 0xd8, 0xb5, + 0xe3, 0xc8, 0x58, 0x88, 0x23, 0x63, 0x69, 0x9f, 0x7a, 0x40, 0x54, 0x04, 0x6f, 0x23, 0x04, 0x2f, + 0x87, 0x01, 0x28, 0xfd, 0xfa, 0xa2, 0xda, 0x87, 0xb3, 0x7d, 0xe8, 0xde, 0x28, 0x42, 0x26, 0x76, + 0x35, 0x7f, 0xab, 0xa0, 0xf5, 0xbd, 0x50, 0x52, 0xc9, 0xfc, 0xde, 0x53, 0x68, 0xf7, 0x39, 0x1f, + 0x14, 0x60, 0x7a, 0x81, 0x6a, 0x8e, 0xcb, 0xc0, 0x97, 0x3b, 0xdc, 0xef, 0xb2, 0x9e, 0xe2, 0xaa, + 0x6e, 0x7f, 0x61, 0x16, 0xfd, 0xc2, 0x66, 0x46, 0xb5, 0x33, 0x51, 0xc4, 0x7e, 0x27, 0x23, 0xaa, + 0x4d, 0xa2, 0x24, 0x47, 0x84, 0x9f, 0xa1, 0x72, 0x10, 0xba, 0x20, 0xf4, 0x52, 0xa3, 0xd4, 0xaa, + 0x6e, 0x7f, 0x5a, 0x84, 0xd1, 0x24, 0xa1, 0x0b, 0x4f, 0x99, 0xec, 0x3f, 0x18, 0x42, 0x0a, 0x0a, + 0x7b, 0x35, 0xe3, 0x2a, 0x27, 0x31, 0x41, 0xd2, 0xa2, 0x78, 0x17, 0xad, 0x76, 0x29, 0x73, 0xc3, + 0x00, 0x1e, 0x72, 0x97, 0x39, 0x47, 0xfa, 0x92, 0xba, 0x81, 0x0f, 0xe3, 0xc8, 0x58, 0xbd, 0x3f, + 0x19, 0x38, 0x8d, 0x8c, 0x8d, 0x1c, 0xf0, 0xe8, 0x68, 0x08, 0x24, 0x9f, 0x8c, 0xbf, 0x44, 0x55, + 0x2f, 0xf9, 0x84, 0x59, 0xad, 0x15, 0x55, 0xab, 0x19, 0x47, 0x46, 0x75, 0x6f, 0x0c, 0x9f, 0x46, + 0xc6, 0xfa, 0xc4, 0x52, 0xd5, 0x99, 0x4c, 0xc3, 0x2f, 0xd1, 0x46, 0x72, 0xe5, 0x62, 0x48, 0x1d, + 0x38, 0x00, 0x17, 0x1c, 0xc9, 0x03, 0xbd, 0xac, 0xee, 0xfb, 0xd6, 0x84, 0xfa, 0x51, 0x73, 0x99, + 0xc3, 0x41, 0x2f, 0x01, 0x84, 0x99, 0xf4, 0x70, 0x22, 0x7f, 0x97, 0xb6, 0xc1, 0x3d, 0x4b, 0xb5, + 0xdf, 0x8d, 0x23, 0x63, 0x63, 0x7f, 0xba, 0x22, 0x99, 0x25, 0xc1, 0x1c, 0xad, 0xf1, 0xf6, 0x0f, + 0xe0, 0xc8, 0x11, 0x6d, 0xf5, 0xe2, 0xb4, 0x38, 0x8e, 0x8c, 0xb5, 0x07, 0xb9, 0x72, 0x64, 0xaa, + 0x7c, 0x72, 0x61, 0x82, 0x75, 0xe0, 0x5e, 0xb7, 0x0b, 0x8e, 0x14, 0xfa, 0xff, 0xc6, 0x17, 0x76, + 0x30, 0x86, 0x93, 0x0b, 0x1b, 0x2f, 0x77, 0x5c, 0x2a, 0x04, 0x99, 0x4c, 0xc3, 0x77, 0xd0, 0x5a, + 0xf2, 0xb0, 0x78, 0x28, 0x0f, 0xc0, 0xe1, 0x7e, 0x47, 0xe8, 0xcb, 0x0d, 0xad, 0x55, 0x4e, 0x4f, + 0xf0, 0x28, 0x17, 0x21, 0x53, 0x3b, 0xf1, 0x63, 0xb4, 0x39, 0xea, 0x22, 0x02, 0x87, 0x0c, 0x5e, + 0x3c, 0x81, 0x20, 0x59, 0x08, 0xbd, 0xd2, 0x28, 0xb5, 0x56, 0xec, 0x0f, 0xe2, 0xc8, 0xd8, 0xbc, + 0x3b, 0x7f, 0x0b, 0x39, 0x2f, 0x17, 0x3f, 0x47, 0x38, 0x00, 0xe6, 0x1f, 0x72, 0x47, 0xb5, 0x5f, + 0xd6, 0x10, 0x48, 0xe9, 0xbb, 0x11, 0x47, 0x06, 0x26, 0x33, 0xd1, 0xd3, 0xc8, 0x78, 0x6f, 0x16, + 0x55, 0xed, 0x31, 0xa7, 0x16, 0xfe, 0x11, 0xad, 0x7b, 0xb9, 0x71, 0x21, 0xf4, 0x9a, 0x7a, 0x21, + 0xb7, 0x8b, 0xbf, 0xc9, 0xfc, 0xbc, 0xb1, 0x37, 0xb3, 0x27, 0xb2, 0x9e, 0xc7, 0x05, 0x99, 0x66, + 0x6a, 0xfe, 0xae, 0xa1, 0xeb, 0x53, 0x33, 0x24, 0x7d, 0xae, 0x61, 0xca, 0x80, 0x9f, 0xa3, 0x4a, + 0xd2, 0x15, 0x1d, 0x2a, 0xa9, 0x1a, 0x2a, 0xd5, 0xed, 0x1b, 0xc5, 0x7a, 0x28, 0x6d, 0x98, 0x3d, + 0x90, 0x74, 0x3c, 0xc8, 0xc6, 0x18, 0x19, 0x55, 0xc5, 0xdf, 0xa1, 0x4a, 0xc6, 0x2c, 0xf4, 0x45, + 0x25, 0xfc, 0xb3, 0x7f, 0x21, 0x3c, 0x7f, 0x76, 0x7b, 0x29, 0xa1, 0x22, 0xa3, 0x82, 0xcd, 0xbf, + 0x34, 0xd4, 0x78, 0x93, 0xbe, 0x5d, 0x26, 0x24, 0x7e, 0x36, 0xa3, 0xd1, 0x2c, 0xf8, 0x4e, 0x98, + 0x48, 0x15, 0x5e, 0xcb, 0x14, 0x56, 0xce, 0x90, 0x09, 0x7d, 0x03, 0x54, 0x66, 0x12, 0xbc, 0x33, + 0x71, 0xf7, 0x2f, 0x2c, 0x2e, 0x77, 0xf0, 0xf1, 0x18, 0xfc, 0x3a, 0x29, 0x4e, 0x52, 0x8e, 0xe6, + 0x2f, 0x1a, 0xba, 0x76, 0x00, 0xc1, 0x21, 0x73, 0x80, 0x40, 0x17, 0x02, 0xf0, 0x1d, 0xc0, 0x16, + 0x5a, 0x19, 0x8d, 0x88, 0xcc, 0x19, 0x36, 0xb2, 0xec, 0x95, 0xd1, 0x38, 0x21, 0xe3, 0x3d, 0x23, + 0x17, 0x59, 0x3c, 0xd7, 0x45, 0xae, 0xa3, 0xa5, 0x21, 0x95, 0x7d, 0xbd, 0xa4, 0x76, 0x54, 0x92, + 0xe8, 0x43, 0x2a, 0xfb, 0x44, 0xa1, 0x2a, 0xca, 0x03, 0xa9, 0x66, 0x70, 0x39, 0x8b, 0xf2, 0x40, + 0x12, 0x85, 0x36, 0x4f, 0x96, 0xd1, 0xc6, 0x13, 0xea, 0xb2, 0xce, 0x95, 0x73, 0x5d, 0x39, 0xd7, + 0xdb, 0x9d, 0x0b, 0x5d, 0x39, 0xd7, 0x85, 0x9c, 0x6b, 0x8e, 0xaf, 0x54, 0x2f, 0xcd, 0x57, 0x4e, + 0x34, 0x54, 0x9f, 0x79, 0xe3, 0x97, 0xed, 0x2c, 0xdf, 0xcf, 0x38, 0xcb, 0xe7, 0xc5, 0xa5, 0xcf, + 0x9c, 0x7e, 0xc6, 0x5b, 0xfe, 0xd6, 0x50, 0xf3, 0xcd, 0x1a, 0x2f, 0xc1, 0x5d, 0xbc, 0xbc, 0xbb, + 0x7c, 0xf5, 0x1f, 0x04, 0x16, 0xf1, 0x97, 0x5f, 0x35, 0xf4, 0xff, 0x39, 0x63, 0x14, 0xbf, 0x8f, + 0x4a, 0x61, 0xe0, 0x66, 0x76, 0xb0, 0x1c, 0x47, 0x46, 0xe9, 0x31, 0xd9, 0x25, 0x09, 0x86, 0x29, + 0x5a, 0x16, 0xa9, 0x23, 0x65, 0xf2, 0xef, 0x14, 0x3f, 0xe3, 0xb4, 0x95, 0xd9, 0xd5, 0x38, 0x32, + 0x96, 0xcf, 0xd0, 0xb3, 0xba, 0xb8, 0x85, 0x2a, 0x0e, 0xb5, 0x43, 0xbf, 0xe3, 0xa6, 0x9e, 0x55, + 0xb3, 0x6b, 0xc9, 0x75, 0xed, 0xdc, 0x4d, 0x31, 0x32, 0x8a, 0xda, 0xfb, 0xc7, 0x27, 0xf5, 0x85, + 0x57, 0x27, 0xf5, 0x85, 0xd7, 0x27, 0xf5, 0x85, 0x9f, 0xe2, 0xba, 0x76, 0x1c, 0xd7, 0xb5, 0x57, + 0x71, 0x5d, 0x7b, 0x1d, 0xd7, 0xb5, 0x3f, 0xe2, 0xba, 0xf6, 0xf3, 0x9f, 0xf5, 0x85, 0x6f, 0x5b, + 0x45, 0xff, 0x28, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0x1f, 0xf5, 0x97, 0x1c, 0x6c, 0x0f, 0x00, + 0x00, +} + +func (m *MatchCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MatchCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MatchCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *MutatingWebhook) Marshal() (dAtA []byte, err error) { @@ -369,6 +436,20 @@ func (m *MutatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.MatchConditions) > 0 { + for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + } if m.ObjectSelector != nil { { size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) @@ -626,6 +707,20 @@ func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.MatchConditions) > 0 { + for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + } if m.ObjectSelector != nil { { size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) @@ -871,6 +966,19 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } +func (m *MatchCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *MutatingWebhook) Size() (n int) { if m == nil { return 0 @@ -920,6 +1028,12 @@ func (m *MutatingWebhook) Size() (n int) { l = m.ObjectSelector.Size() n += 1 + l + sovGenerated(uint64(l)) } + if len(m.MatchConditions) > 0 { + for _, e := range m.MatchConditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -1022,6 +1136,12 @@ func (m *ValidatingWebhook) Size() (n int) { l = m.ObjectSelector.Size() n += 1 + l + sovGenerated(uint64(l)) } + if len(m.MatchConditions) > 0 { + for _, e := range m.MatchConditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -1086,6 +1206,17 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *MatchCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MatchCondition{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} func (this *MutatingWebhook) String() string { if this == nil { return "nil" @@ -1095,6 +1226,11 @@ func (this *MutatingWebhook) String() string { repeatedStringForRules += fmt.Sprintf("%v", f) + "," } repeatedStringForRules += "}" + repeatedStringForMatchConditions := "[]MatchCondition{" + for _, f := range this.MatchConditions { + repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchConditions += "}" s := strings.Join([]string{`&MutatingWebhook{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, @@ -1107,6 +1243,7 @@ func (this *MutatingWebhook) String() string { `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, `ReinvocationPolicy:` + valueToStringGenerated(this.ReinvocationPolicy) + `,`, `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v11.LabelSelector", 1) + `,`, + `MatchConditions:` + repeatedStringForMatchConditions + `,`, `}`, }, "") return s @@ -1165,6 +1302,11 @@ func (this *ValidatingWebhook) String() string { repeatedStringForRules += fmt.Sprintf("%v", f) + "," } repeatedStringForRules += "}" + repeatedStringForMatchConditions := "[]MatchCondition{" + for _, f := range this.MatchConditions { + repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchConditions += "}" s := strings.Join([]string{`&ValidatingWebhook{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, @@ -1176,6 +1318,7 @@ func (this *ValidatingWebhook) String() string { `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v11.LabelSelector", 1) + `,`, + `MatchConditions:` + repeatedStringForMatchConditions + `,`, `}`, }, "") return s @@ -1232,6 +1375,120 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } +func (m *MatchCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MatchCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MatchCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -1616,6 +1873,40 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchConditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchConditions = append(m.MatchConditions, MatchCondition{}) + if err := m.MatchConditions[len(m.MatchConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -2389,6 +2680,40 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchConditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchConditions = append(m.MatchConditions, MatchCondition{}) + if err := m.MatchConditions[len(m.MatchConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto index c7016afbf4..cfd7592854 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto @@ -29,6 +29,35 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "k8s.io/api/admissionregistration/v1beta1"; +// MatchCondition represents a condition which must be fulfilled for a request to be sent to a webhook. +message MatchCondition { + // Name is an identifier for this match condition, used for strategic merging of MatchConditions, + // as well as providing an identifier for logging purposes. A good name should be descriptive of + // the associated expression. + // Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and + // must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or + // '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an + // optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName') + // + // Required. + optional string name = 1; + + // Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. + // CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables: + // + // 'object' - The object from the incoming request. The value is null for DELETE requests. + // 'oldObject' - The existing object. The value is null for CREATE requests. + // 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). + // 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + // + // Required. + optional string expression = 2; +} + // MutatingWebhook describes an admission webhook and the resources and operations it applies to. message MutatingWebhook { // The name of the admission webhook. @@ -177,6 +206,28 @@ message MutatingWebhook { // Defaults to "Never". // +optional optional string reinvocationPolicy = 10; + + // MatchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the webhook is called. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped + // + // This is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate. + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +featureGate=AdmissionWebhookMatchConditions + // +optional + repeated MatchCondition matchConditions = 12; } // MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. @@ -356,6 +407,28 @@ message ValidatingWebhook { // Default to `['v1beta1']`. // +optional repeated string admissionReviewVersions = 8; + + // MatchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the webhook is called. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped + // + // This is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate. + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +featureGate=AdmissionWebhookMatchConditions + // +optional + repeated MatchCondition matchConditions = 11; } // ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go index 5fdf8e3fa7..82ee7df9ba 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go @@ -283,6 +283,28 @@ type ValidatingWebhook struct { // Default to `['v1beta1']`. // +optional AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty" protobuf:"bytes,8,rep,name=admissionReviewVersions"` + + // MatchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the webhook is called. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped + // + // This is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate. + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +featureGate=AdmissionWebhookMatchConditions + // +optional + MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,11,rep,name=matchConditions"` } // MutatingWebhook describes an admission webhook and the resources and operations it applies to. @@ -433,6 +455,28 @@ type MutatingWebhook struct { // Defaults to "Never". // +optional ReinvocationPolicy *ReinvocationPolicyType `json:"reinvocationPolicy,omitempty" protobuf:"bytes,10,opt,name=reinvocationPolicy,casttype=ReinvocationPolicyType"` + + // MatchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the webhook is called. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped + // + // This is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate. + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +featureGate=AdmissionWebhookMatchConditions + // +optional + MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,12,rep,name=matchConditions"` } // ReinvocationPolicyType specifies what type of policy the admission hook uses. @@ -531,3 +575,32 @@ type ServiceReference struct { // +optional Port *int32 `json:"port,omitempty" protobuf:"varint,4,opt,name=port"` } + +// MatchCondition represents a condition which must be fulfilled for a request to be sent to a webhook. +type MatchCondition struct { + // Name is an identifier for this match condition, used for strategic merging of MatchConditions, + // as well as providing an identifier for logging purposes. A good name should be descriptive of + // the associated expression. + // Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and + // must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or + // '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an + // optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName') + // + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. + // CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables: + // + // 'object' - The object from the incoming request. The value is null for DELETE requests. + // 'oldObject' - The existing object. The value is null for CREATE requests. + // 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). + // 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + // + // Required. + Expression string `json:"expression" protobuf:"bytes,2,opt,name=expression"` +} diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go index c57c5b7fa8..2c0a9f0117 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go @@ -24,9 +24,19 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_MatchCondition = map[string]string{ + "": "MatchCondition represents a condition which must be fulfilled for a request to be sent to a webhook.", + "name": "Name is an identifier for this match condition, used for strategic merging of MatchConditions, as well as providing an identifier for logging purposes. A good name should be descriptive of the associated expression. Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')\n\nRequired.", + "expression": "Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables:\n\n'object' - The object from the incoming request. The value is null for DELETE requests. 'oldObject' - The existing object. The value is null for CREATE requests. 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\nDocumentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/\n\nRequired.", +} + +func (MatchCondition) SwaggerDoc() map[string]string { + return map_MatchCondition +} + var map_MutatingWebhook = map[string]string{ "": "MutatingWebhook describes an admission webhook and the resources and operations it applies to.", "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", @@ -40,6 +50,7 @@ var map_MutatingWebhook = map[string]string{ "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds.", "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.", "reinvocationPolicy": "reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. Allowed values are \"Never\" and \"IfNeeded\".\n\nNever: the webhook will not be called more than once in a single admission evaluation.\n\nIfNeeded: the webhook will be called at least one additional time as part of the admission evaluation if the object being admitted is modified by other admission plugins after the initial webhook call. Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. Note: * the number of additional invocations is not guaranteed to be exactly one. * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. * webhooks that use this option may be reordered to minimize the number of additional invocations. * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead.\n\nDefaults to \"Never\".", + "matchConditions": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\n\nThis is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate.", } func (MutatingWebhook) SwaggerDoc() map[string]string { @@ -90,6 +101,7 @@ var map_ValidatingWebhook = map[string]string{ "sideEffects": "SideEffects states whether this webhook has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission chain and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.", "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds.", "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.", + "matchConditions": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\n\nThis is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate.", } func (ValidatingWebhook) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go index ced4af19c6..9c5299bdfa 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go @@ -27,6 +27,22 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchCondition) DeepCopyInto(out *MatchCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchCondition. +func (in *MatchCondition) DeepCopy() *MatchCondition { + if in == nil { + return nil + } + out := new(MatchCondition) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MutatingWebhook) DeepCopyInto(out *MutatingWebhook) { *out = *in @@ -78,6 +94,11 @@ func (in *MutatingWebhook) DeepCopyInto(out *MutatingWebhook) { *out = new(ReinvocationPolicyType) **out = **in } + if in.MatchConditions != nil { + in, out := &in.MatchConditions, &out.MatchConditions + *out = make([]MatchCondition, len(*in)) + copy(*out, *in) + } return } @@ -229,6 +250,11 @@ func (in *ValidatingWebhook) DeepCopyInto(out *ValidatingWebhook) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.MatchConditions != nil { + in, out := &in.MatchConditions, &out.MatchConditions + *out = make([]MatchCondition, len(*in)) + copy(*out, *in) + } return } diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go index 6de9342006..3b75fa65bc 100644 --- a/vendor/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ServerStorageVersion = map[string]string{ diff --git a/vendor/k8s.io/api/apps/v1/generated.proto b/vendor/k8s.io/api/apps/v1/generated.proto index 534b550fec..a7a7e7c547 100644 --- a/vendor/k8s.io/api/apps/v1/generated.proto +++ b/vendor/k8s.io/api/apps/v1/generated.proto @@ -127,6 +127,7 @@ message DaemonSetSpec { // The DaemonSet will create exactly one copy of this pod on every node // that matches the template's node selector (or on every node if no node // selector is specified). + // The only allowed template.spec.restartPolicy value is "Always". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template optional k8s.io.api.core.v1.PodTemplateSpec template = 2; @@ -277,6 +278,7 @@ message DeploymentSpec { optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template describes the pods that will be created. + // The only allowed template.spec.restartPolicy value is "Always". optional k8s.io.api.core.v1.PodTemplateSpec template = 3; // The deployment strategy to use to replace existing pods with new ones. @@ -675,6 +677,7 @@ message StatefulSetSpec { // of the StatefulSet. Each pod will be named with the format // -. For example, a pod in a StatefulSet named // "web" with index number "3" would be named "web-3". + // The only allowed template.spec.restartPolicy value is "Always". optional k8s.io.api.core.v1.PodTemplateSpec template = 3; // volumeClaimTemplates is a list of claims that pods are allowed to reference. @@ -735,7 +738,7 @@ message StatefulSetSpec { // default ordinals behavior assigns a "0" index to the first replica and // increments the index by one for each additional replica requested. Using // the ordinals field requires the StatefulSetStartOrdinal feature gate to be - // enabled, which is alpha. + // enabled, which is beta. // +optional optional StatefulSetOrdinals ordinals = 11; } diff --git a/vendor/k8s.io/api/apps/v1/types.go b/vendor/k8s.io/api/apps/v1/types.go index 09766c2953..15dc3150a6 100644 --- a/vendor/k8s.io/api/apps/v1/types.go +++ b/vendor/k8s.io/api/apps/v1/types.go @@ -199,6 +199,7 @@ type StatefulSetSpec struct { // of the StatefulSet. Each pod will be named with the format // -. For example, a pod in a StatefulSet named // "web" with index number "3" would be named "web-3". + // The only allowed template.spec.restartPolicy value is "Always". Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` // volumeClaimTemplates is a list of claims that pods are allowed to reference. @@ -259,7 +260,7 @@ type StatefulSetSpec struct { // default ordinals behavior assigns a "0" index to the first replica and // increments the index by one for each additional replica requested. Using // the ordinals field requires the StatefulSetStartOrdinal feature gate to be - // enabled, which is alpha. + // enabled, which is beta. // +optional Ordinals *StatefulSetOrdinals `json:"ordinals,omitempty" protobuf:"bytes,11,opt,name=ordinals"` } @@ -379,6 +380,7 @@ type DeploymentSpec struct { Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` // Template describes the pods that will be created. + // The only allowed template.spec.restartPolicy value is "Always". Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` // The deployment strategy to use to replace existing pods with new ones. @@ -638,6 +640,7 @@ type DaemonSetSpec struct { // The DaemonSet will create exactly one copy of this pod on every node // that matches the template's node selector (or on every node if no node // selector is specified). + // The only allowed template.spec.restartPolicy value is "Always". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,2,opt,name=template"` diff --git a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go index 509bb11c50..6676da0640 100644 --- a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ControllerRevision = map[string]string{ @@ -85,7 +85,7 @@ func (DaemonSetList) SwaggerDoc() map[string]string { var map_DaemonSetSpec = map[string]string{ "": "DaemonSetSpec is the specification of a daemon set.", "selector": "A label query over pods that are managed by the daemon set. Must match in order to be controlled. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", + "template": "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). The only allowed template.spec.restartPolicy value is \"Always\". More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", "updateStrategy": "An update strategy to replace existing DaemonSet pods with new pods.", "minReadySeconds": "The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).", "revisionHistoryLimit": "The number of old history to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", @@ -162,7 +162,7 @@ var map_DeploymentSpec = map[string]string{ "": "DeploymentSpec is the specification of the desired behavior of the Deployment.", "replicas": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", "selector": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment. It must match the pod template's labels.", - "template": "Template describes the pods that will be created.", + "template": "Template describes the pods that will be created. The only allowed template.spec.restartPolicy value is \"Always\".", "strategy": "The deployment strategy to use to replace existing pods with new ones.", "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", @@ -347,7 +347,7 @@ var map_StatefulSetSpec = map[string]string{ "": "A StatefulSetSpec is the specification of a StatefulSet.", "replicas": "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", "selector": "selector is a label query over pods that should match the replica count. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet. Each pod will be named with the format -. For example, a pod in a StatefulSet named \"web\" with index number \"3\" would be named \"web-3\".", + "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet. Each pod will be named with the format -. For example, a pod in a StatefulSet named \"web\" with index number \"3\" would be named \"web-3\". The only allowed template.spec.restartPolicy value is \"Always\".", "volumeClaimTemplates": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", "serviceName": "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.", "podManagementPolicy": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.", @@ -355,7 +355,7 @@ var map_StatefulSetSpec = map[string]string{ "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", "persistentVolumeClaimRetentionPolicy": "persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent volume claims created from volumeClaimTemplates. By default, all persistent volume claims are created as needed and retained until manually deleted. This policy allows the lifecycle to be altered, for example by deleting persistent volume claims when their stateful set is deleted, or when their pod is scaled down. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha. +optional", - "ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested. Using the ordinals field requires the StatefulSetStartOrdinal feature gate to be enabled, which is alpha.", + "ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested. Using the ordinals field requires the StatefulSetStartOrdinal feature gate to be enabled, which is beta.", } func (StatefulSetSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.proto b/vendor/k8s.io/api/apps/v1beta1/generated.proto index 15fb1aa878..245ec30f42 100644 --- a/vendor/k8s.io/api/apps/v1beta1/generated.proto +++ b/vendor/k8s.io/api/apps/v1beta1/generated.proto @@ -47,10 +47,10 @@ message ControllerRevision { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Data is the serialized representation of the state. + // data is the serialized representation of the state. optional k8s.io.apimachinery.pkg.runtime.RawExtension data = 2; - // Revision indicates the revision of the state represented by Data. + // revision indicates the revision of the state represented by Data. optional int64 revision = 3; } @@ -128,17 +128,18 @@ message DeploymentRollback { // DeploymentSpec is the specification of the desired behavior of the Deployment. message DeploymentSpec { - // Number of desired pods. This is a pointer to distinguish between explicit + // replicas is the number of desired pods. This is a pointer to distinguish between explicit // zero and not specified. Defaults to 1. // +optional optional int32 replicas = 1; - // Label selector for pods. Existing ReplicaSets whose pods are + // selector is the label selector for pods. Existing ReplicaSets whose pods are // selected by this will be the ones affected by this deployment. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template describes the pods that will be created. + // The only allowed template.spec.restartPolicy value is "Always". optional k8s.io.api.core.v1.PodTemplateSpec template = 3; // The deployment strategy to use to replace existing pods with new ones. @@ -146,28 +147,28 @@ message DeploymentSpec { // +patchStrategy=retainKeys optional DeploymentStrategy strategy = 4; - // Minimum number of seconds for which a newly created pod should be ready + // minReadySeconds is the minimum number of seconds for which a newly created pod should be ready // without any of its container crashing, for it to be considered available. // Defaults to 0 (pod will be considered available as soon as it is ready) // +optional optional int32 minReadySeconds = 5; - // The number of old ReplicaSets to retain to allow rollback. + // revisionHistoryLimit is the number of old ReplicaSets to retain to allow rollback. // This is a pointer to distinguish between explicit zero and not specified. // Defaults to 2. // +optional optional int32 revisionHistoryLimit = 6; - // Indicates that the deployment is paused. + // paused indicates that the deployment is paused. // +optional optional bool paused = 7; // DEPRECATED. - // The config this deployment is rolling back to. Will be cleared after rollback is done. + // rollbackTo is the config this deployment is rolling back to. Will be cleared after rollback is done. // +optional optional RollbackConfig rollbackTo = 8; - // The maximum time in seconds for a deployment to make progress before it + // progressDeadlineSeconds is the maximum time in seconds for a deployment to make progress before it // is considered to be failed. The deployment controller will continue to // process failed deployments and a condition with a ProgressDeadlineExceeded // reason will be surfaced in the deployment status. Note that progress will @@ -178,15 +179,15 @@ message DeploymentSpec { // DeploymentStatus is the most recently observed status of the Deployment. message DeploymentStatus { - // The generation observed by the deployment controller. + // observedGeneration is the generation observed by the deployment controller. // +optional optional int64 observedGeneration = 1; - // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // replicas is the total number of non-terminated pods targeted by this deployment (their labels match the selector). // +optional optional int32 replicas = 2; - // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // updatedReplicas is the total number of non-terminated pods targeted by this deployment that have the desired template spec. // +optional optional int32 updatedReplicas = 3; @@ -198,18 +199,18 @@ message DeploymentStatus { // +optional optional int32 availableReplicas = 4; - // Total number of unavailable pods targeted by this deployment. This is the total number of + // unavailableReplicas is the total number of unavailable pods targeted by this deployment. This is the total number of // pods that are still required for the deployment to have 100% available capacity. They may // either be pods that are running but not yet available or pods that still have not been created. // +optional optional int32 unavailableReplicas = 5; - // Represents the latest available observations of a deployment's current state. + // Conditions represent the latest available observations of a deployment's current state. // +patchMergeKey=type // +patchStrategy=merge repeated DeploymentCondition conditions = 6; - // Count of hash collisions for the Deployment. The Deployment controller uses this + // collisionCount is the count of hash collisions for the Deployment. The Deployment controller uses this // field as a collision avoidance mechanism when it needs to create the name for the // newest ReplicaSet. // +optional @@ -276,7 +277,7 @@ message RollingUpdateStatefulSetStrategy { // This is helpful in being able to do a canary based deployment. The default value is 0. optional int32 partition = 1; - // The maximum number of pods that can be unavailable during the update. + // maxUnavailable is the maximum number of pods that can be unavailable during the update. // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). // Absolute number is calculated from percentage by rounding up. This can not be 0. // Defaults to 1. This field is alpha-level and is only honored by servers that enable the @@ -293,32 +294,32 @@ message Scale { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // spec defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional optional ScaleSpec spec = 2; - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. + // status defines current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. // +optional optional ScaleStatus status = 3; } // ScaleSpec describes the attributes of a scale subresource message ScaleSpec { - // desired number of instances for the scaled object. + // replicas is the number of observed instances of the scaled object. // +optional optional int32 replicas = 1; } // ScaleStatus represents the current status of a scale subresource. message ScaleStatus { - // actual number of observed instances of the scaled object. + // replias is the actual number of observed instances of the scaled object. optional int32 replicas = 1; - // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // selector is a label query over pods that should match the replicas count. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional map selector = 2; - // label selector for pods that should match the replicas count. This is a serializated + // targetSelector is the label selector for pods that should match the replicas count. This is a serializated // version of both map-based and more expressive set-based selectors. This is done to // avoid introspection in the clients. The string will be in the same format as the // query-param syntax. If the target type only supports map-based selectors, both this @@ -398,13 +399,13 @@ message StatefulSetOrdinals { // StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs // created from the StatefulSet VolumeClaimTemplates. message StatefulSetPersistentVolumeClaimRetentionPolicy { - // WhenDeleted specifies what happens to PVCs created from StatefulSet + // whenDeleted specifies what happens to PVCs created from StatefulSet // VolumeClaimTemplates when the StatefulSet is deleted. The default policy // of `Retain` causes PVCs to not be affected by StatefulSet deletion. The // `Delete` policy causes those PVCs to be deleted. optional string whenDeleted = 1; - // WhenScaled specifies what happens to PVCs created from StatefulSet + // whenScaled specifies what happens to PVCs created from StatefulSet // VolumeClaimTemplates when the StatefulSet is scaled down. The default // policy of `Retain` causes PVCs to not be affected by a scaledown. The // `Delete` policy causes the associated PVCs for any excess pods above @@ -475,7 +476,7 @@ message StatefulSetSpec { // StatefulSetSpec version. The default value is 10. optional int32 revisionHistoryLimit = 8; - // Minimum number of seconds for which a newly created pod should be ready + // minReadySeconds is the minimum number of seconds for which a newly created pod should be ready // without any of its container crashing for it to be considered available. // Defaults to 0 (pod will be considered available as soon as it is ready) // +optional @@ -491,7 +492,7 @@ message StatefulSetSpec { // default ordinals behavior assigns a "0" index to the first replica and // increments the index by one for each additional replica requested. Using // the ordinals field requires the StatefulSetStartOrdinal feature gate to be - // enabled, which is alpha. + // enabled, which is beta. // +optional optional StatefulSetOrdinals ordinals = 11; } @@ -531,13 +532,13 @@ message StatefulSetStatus { // +optional optional int32 collisionCount = 9; - // Represents the latest available observations of a statefulset's current state. + // conditions represent the latest available observations of a statefulset's current state. // +optional // +patchMergeKey=type // +patchStrategy=merge repeated StatefulSetCondition conditions = 10; - // Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. + // availableReplicas is the total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. // +optional optional int32 availableReplicas = 11; } diff --git a/vendor/k8s.io/api/apps/v1beta1/types.go b/vendor/k8s.io/api/apps/v1beta1/types.go index 9100230905..59ed9c2ac3 100644 --- a/vendor/k8s.io/api/apps/v1beta1/types.go +++ b/vendor/k8s.io/api/apps/v1beta1/types.go @@ -31,21 +31,21 @@ const ( // ScaleSpec describes the attributes of a scale subresource type ScaleSpec struct { - // desired number of instances for the scaled object. + // replicas is the number of observed instances of the scaled object. // +optional Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` } // ScaleStatus represents the current status of a scale subresource. type ScaleStatus struct { - // actual number of observed instances of the scaled object. + // replias is the actual number of observed instances of the scaled object. Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` - // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // selector is a label query over pods that should match the replicas count. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` - // label selector for pods that should match the replicas count. This is a serializated + // targetSelector is the label selector for pods that should match the replicas count. This is a serializated // version of both map-based and more expressive set-based selectors. This is done to // avoid introspection in the clients. The string will be in the same format as the // query-param syntax. If the target type only supports map-based selectors, both this @@ -68,11 +68,11 @@ type Scale struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // spec defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. + // status defines current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. // +optional Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -159,7 +159,7 @@ type RollingUpdateStatefulSetStrategy struct { // Partition are updated. All pods from ordinal Partition-1 to 0 remain untouched. // This is helpful in being able to do a canary based deployment. The default value is 0. Partition *int32 `json:"partition,omitempty" protobuf:"varint,1,opt,name=partition"` - // The maximum number of pods that can be unavailable during the update. + // maxUnavailable is the maximum number of pods that can be unavailable during the update. // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). // Absolute number is calculated from percentage by rounding up. This can not be 0. // Defaults to 1. This field is alpha-level and is only honored by servers that enable the @@ -191,12 +191,12 @@ const ( // StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs // created from the StatefulSet VolumeClaimTemplates. type StatefulSetPersistentVolumeClaimRetentionPolicy struct { - // WhenDeleted specifies what happens to PVCs created from StatefulSet + // whenDeleted specifies what happens to PVCs created from StatefulSet // VolumeClaimTemplates when the StatefulSet is deleted. The default policy // of `Retain` causes PVCs to not be affected by StatefulSet deletion. The // `Delete` policy causes those PVCs to be deleted. WhenDeleted PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty" protobuf:"bytes,1,opt,name=whenDeleted,casttype=PersistentVolumeClaimRetentionPolicyType"` - // WhenScaled specifies what happens to PVCs created from StatefulSet + // whenScaled specifies what happens to PVCs created from StatefulSet // VolumeClaimTemplates when the StatefulSet is scaled down. The default // policy of `Retain` causes PVCs to not be affected by a scaledown. The // `Delete` policy causes the associated PVCs for any excess pods above @@ -282,7 +282,7 @@ type StatefulSetSpec struct { // StatefulSetSpec version. The default value is 10. RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,8,opt,name=revisionHistoryLimit"` - // Minimum number of seconds for which a newly created pod should be ready + // minReadySeconds is the minimum number of seconds for which a newly created pod should be ready // without any of its container crashing for it to be considered available. // Defaults to 0 (pod will be considered available as soon as it is ready) // +optional @@ -298,7 +298,7 @@ type StatefulSetSpec struct { // default ordinals behavior assigns a "0" index to the first replica and // increments the index by one for each additional replica requested. Using // the ordinals field requires the StatefulSetStartOrdinal feature gate to be - // enabled, which is alpha. + // enabled, which is beta. // +optional Ordinals *StatefulSetOrdinals `json:"ordinals,omitempty" protobuf:"bytes,11,opt,name=ordinals"` } @@ -338,13 +338,13 @@ type StatefulSetStatus struct { // +optional CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"` - // Represents the latest available observations of a statefulset's current state. + // conditions represent the latest available observations of a statefulset's current state. // +optional // +patchMergeKey=type // +patchStrategy=merge Conditions []StatefulSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` - // Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. + // availableReplicas is the total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. // +optional AvailableReplicas int32 `json:"availableReplicas" protobuf:"varint,11,opt,name=availableReplicas"` } @@ -409,17 +409,18 @@ type Deployment struct { // DeploymentSpec is the specification of the desired behavior of the Deployment. type DeploymentSpec struct { - // Number of desired pods. This is a pointer to distinguish between explicit + // replicas is the number of desired pods. This is a pointer to distinguish between explicit // zero and not specified. Defaults to 1. // +optional Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` - // Label selector for pods. Existing ReplicaSets whose pods are + // selector is the label selector for pods. Existing ReplicaSets whose pods are // selected by this will be the ones affected by this deployment. // +optional Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` // Template describes the pods that will be created. + // The only allowed template.spec.restartPolicy value is "Always". Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` // The deployment strategy to use to replace existing pods with new ones. @@ -427,28 +428,28 @@ type DeploymentSpec struct { // +patchStrategy=retainKeys Strategy DeploymentStrategy `json:"strategy,omitempty" patchStrategy:"retainKeys" protobuf:"bytes,4,opt,name=strategy"` - // Minimum number of seconds for which a newly created pod should be ready + // minReadySeconds is the minimum number of seconds for which a newly created pod should be ready // without any of its container crashing, for it to be considered available. // Defaults to 0 (pod will be considered available as soon as it is ready) // +optional MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,5,opt,name=minReadySeconds"` - // The number of old ReplicaSets to retain to allow rollback. + // revisionHistoryLimit is the number of old ReplicaSets to retain to allow rollback. // This is a pointer to distinguish between explicit zero and not specified. // Defaults to 2. // +optional RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"` - // Indicates that the deployment is paused. + // paused indicates that the deployment is paused. // +optional Paused bool `json:"paused,omitempty" protobuf:"varint,7,opt,name=paused"` // DEPRECATED. - // The config this deployment is rolling back to. Will be cleared after rollback is done. + // rollbackTo is the config this deployment is rolling back to. Will be cleared after rollback is done. // +optional RollbackTo *RollbackConfig `json:"rollbackTo,omitempty" protobuf:"bytes,8,opt,name=rollbackTo"` - // The maximum time in seconds for a deployment to make progress before it + // progressDeadlineSeconds is the maximum time in seconds for a deployment to make progress before it // is considered to be failed. The deployment controller will continue to // process failed deployments and a condition with a ProgressDeadlineExceeded // reason will be surfaced in the deployment status. Note that progress will @@ -547,15 +548,15 @@ type RollingUpdateDeployment struct { // DeploymentStatus is the most recently observed status of the Deployment. type DeploymentStatus struct { - // The generation observed by the deployment controller. + // observedGeneration is the generation observed by the deployment controller. // +optional ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` - // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // replicas is the total number of non-terminated pods targeted by this deployment (their labels match the selector). // +optional Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"` - // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // updatedReplicas is the total number of non-terminated pods targeted by this deployment that have the desired template spec. // +optional UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"` @@ -567,18 +568,18 @@ type DeploymentStatus struct { // +optional AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"` - // Total number of unavailable pods targeted by this deployment. This is the total number of + // unavailableReplicas is the total number of unavailable pods targeted by this deployment. This is the total number of // pods that are still required for the deployment to have 100% available capacity. They may // either be pods that are running but not yet available or pods that still have not been created. // +optional UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"` - // Represents the latest available observations of a deployment's current state. + // Conditions represent the latest available observations of a deployment's current state. // +patchMergeKey=type // +patchStrategy=merge Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` - // Count of hash collisions for the Deployment. The Deployment controller uses this + // collisionCount is the count of hash collisions for the Deployment. The Deployment controller uses this // field as a collision avoidance mechanism when it needs to create the name for the // newest ReplicaSet. // +optional @@ -660,10 +661,10 @@ type ControllerRevision struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Data is the serialized representation of the state. + // data is the serialized representation of the state. Data runtime.RawExtension `json:"data,omitempty" protobuf:"bytes,2,opt,name=data"` - // Revision indicates the revision of the state represented by Data. + // revision indicates the revision of the state represented by Data. Revision int64 `json:"revision" protobuf:"varint,3,opt,name=revision"` } diff --git a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go index 00d6d18252..a62e9869d6 100644 --- a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go @@ -24,14 +24,14 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ControllerRevision = map[string]string{ "": "DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1beta2/ControllerRevision. See the release notes for more information. ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "data": "Data is the serialized representation of the state.", - "revision": "Revision indicates the revision of the state represented by Data.", + "data": "data is the serialized representation of the state.", + "revision": "revision indicates the revision of the state represented by Data.", } func (ControllerRevision) SwaggerDoc() map[string]string { @@ -96,15 +96,15 @@ func (DeploymentRollback) SwaggerDoc() map[string]string { var map_DeploymentSpec = map[string]string{ "": "DeploymentSpec is the specification of the desired behavior of the Deployment.", - "replicas": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", - "selector": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment.", - "template": "Template describes the pods that will be created.", + "replicas": "replicas is the number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", + "selector": "selector is the label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment.", + "template": "Template describes the pods that will be created. The only allowed template.spec.restartPolicy value is \"Always\".", "strategy": "The deployment strategy to use to replace existing pods with new ones.", - "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", - "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 2.", - "paused": "Indicates that the deployment is paused.", - "rollbackTo": "DEPRECATED. The config this deployment is rolling back to. Will be cleared after rollback is done.", - "progressDeadlineSeconds": "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s.", + "minReadySeconds": "minReadySeconds is the minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "revisionHistoryLimit": "revisionHistoryLimit is the number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 2.", + "paused": "paused indicates that the deployment is paused.", + "rollbackTo": "DEPRECATED. rollbackTo is the config this deployment is rolling back to. Will be cleared after rollback is done.", + "progressDeadlineSeconds": "progressDeadlineSeconds is the maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s.", } func (DeploymentSpec) SwaggerDoc() map[string]string { @@ -113,14 +113,14 @@ func (DeploymentSpec) SwaggerDoc() map[string]string { var map_DeploymentStatus = map[string]string{ "": "DeploymentStatus is the most recently observed status of the Deployment.", - "observedGeneration": "The generation observed by the deployment controller.", - "replicas": "Total number of non-terminated pods targeted by this deployment (their labels match the selector).", - "updatedReplicas": "Total number of non-terminated pods targeted by this deployment that have the desired template spec.", + "observedGeneration": "observedGeneration is the generation observed by the deployment controller.", + "replicas": "replicas is the total number of non-terminated pods targeted by this deployment (their labels match the selector).", + "updatedReplicas": "updatedReplicas is the total number of non-terminated pods targeted by this deployment that have the desired template spec.", "readyReplicas": "readyReplicas is the number of pods targeted by this Deployment controller with a Ready Condition.", "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.", - "unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.", - "conditions": "Represents the latest available observations of a deployment's current state.", - "collisionCount": "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.", + "unavailableReplicas": "unavailableReplicas is the total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.", + "conditions": "Conditions represent the latest available observations of a deployment's current state.", + "collisionCount": "collisionCount is the count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.", } func (DeploymentStatus) SwaggerDoc() map[string]string { @@ -159,7 +159,7 @@ func (RollingUpdateDeployment) SwaggerDoc() map[string]string { var map_RollingUpdateStatefulSetStrategy = map[string]string{ "": "RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.", "partition": "Partition indicates the ordinal at which the StatefulSet should be partitioned for updates. During a rolling update, all pods from ordinal Replicas-1 to Partition are updated. All pods from ordinal Partition-1 to 0 remain untouched. This is helpful in being able to do a canary based deployment. The default value is 0.", - "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding up. This can not be 0. Defaults to 1. This field is alpha-level and is only honored by servers that enable the MaxUnavailableStatefulSet feature. The field applies to all pods in the range 0 to Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it will be counted towards MaxUnavailable.", + "maxUnavailable": "maxUnavailable is the maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding up. This can not be 0. Defaults to 1. This field is alpha-level and is only honored by servers that enable the MaxUnavailableStatefulSet feature. The field applies to all pods in the range 0 to Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it will be counted towards MaxUnavailable.", } func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string { @@ -169,8 +169,8 @@ func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string { var map_Scale = map[string]string{ "": "Scale represents a scaling request for a resource.", "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", - "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", - "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.", + "spec": "spec defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", + "status": "status defines current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.", } func (Scale) SwaggerDoc() map[string]string { @@ -179,7 +179,7 @@ func (Scale) SwaggerDoc() map[string]string { var map_ScaleSpec = map[string]string{ "": "ScaleSpec describes the attributes of a scale subresource", - "replicas": "desired number of instances for the scaled object.", + "replicas": "replicas is the number of observed instances of the scaled object.", } func (ScaleSpec) SwaggerDoc() map[string]string { @@ -188,9 +188,9 @@ func (ScaleSpec) SwaggerDoc() map[string]string { var map_ScaleStatus = map[string]string{ "": "ScaleStatus represents the current status of a scale subresource.", - "replicas": "actual number of observed instances of the scaled object.", - "selector": "label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", - "targetSelector": "label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "replicas": "replias is the actual number of observed instances of the scaled object.", + "selector": "selector is a label query over pods that should match the replicas count. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/", + "targetSelector": "targetSelector is the label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", } func (ScaleStatus) SwaggerDoc() map[string]string { @@ -239,8 +239,8 @@ func (StatefulSetOrdinals) SwaggerDoc() map[string]string { var map_StatefulSetPersistentVolumeClaimRetentionPolicy = map[string]string{ "": "StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates.", - "whenDeleted": "WhenDeleted specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is deleted. The default policy of `Retain` causes PVCs to not be affected by StatefulSet deletion. The `Delete` policy causes those PVCs to be deleted.", - "whenScaled": "WhenScaled specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is scaled down. The default policy of `Retain` causes PVCs to not be affected by a scaledown. The `Delete` policy causes the associated PVCs for any excess pods above the replica count to be deleted.", + "whenDeleted": "whenDeleted specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is deleted. The default policy of `Retain` causes PVCs to not be affected by StatefulSet deletion. The `Delete` policy causes those PVCs to be deleted.", + "whenScaled": "whenScaled specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is scaled down. The default policy of `Retain` causes PVCs to not be affected by a scaledown. The `Delete` policy causes the associated PVCs for any excess pods above the replica count to be deleted.", } func (StatefulSetPersistentVolumeClaimRetentionPolicy) SwaggerDoc() map[string]string { @@ -257,9 +257,9 @@ var map_StatefulSetSpec = map[string]string{ "podManagementPolicy": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.", "updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.", "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", - "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "minReadySeconds": "minReadySeconds is the minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", "persistentVolumeClaimRetentionPolicy": "PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha.", - "ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested. Using the ordinals field requires the StatefulSetStartOrdinal feature gate to be enabled, which is alpha.", + "ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested. Using the ordinals field requires the StatefulSetStartOrdinal feature gate to be enabled, which is beta.", } func (StatefulSetSpec) SwaggerDoc() map[string]string { @@ -276,8 +276,8 @@ var map_StatefulSetStatus = map[string]string{ "currentRevision": "currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas).", "updateRevision": "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", "collisionCount": "collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", - "conditions": "Represents the latest available observations of a statefulset's current state.", - "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet.", + "conditions": "conditions represent the latest available observations of a statefulset's current state.", + "availableReplicas": "availableReplicas is the total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet.", } func (StatefulSetStatus) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.proto b/vendor/k8s.io/api/apps/v1beta2/generated.proto index af8c4fe417..ddbe354411 100644 --- a/vendor/k8s.io/api/apps/v1beta2/generated.proto +++ b/vendor/k8s.io/api/apps/v1beta2/generated.proto @@ -131,6 +131,7 @@ message DaemonSetSpec { // The DaemonSet will create exactly one copy of this pod on every node // that matches the template's node selector (or on every node if no node // selector is specified). + // The only allowed template.spec.restartPolicy value is "Always". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template optional k8s.io.api.core.v1.PodTemplateSpec template = 2; @@ -282,6 +283,7 @@ message DeploymentSpec { optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template describes the pods that will be created. + // The only allowed template.spec.restartPolicy value is "Always". optional k8s.io.api.core.v1.PodTemplateSpec template = 3; // The deployment strategy to use to replace existing pods with new ones. @@ -600,7 +602,7 @@ message ScaleStatus { // actual number of observed instances of the scaled object. optional int32 replicas = 1; - // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // selector is a label query over pods that should match the replicas count. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional // +mapType=atomic map selector = 2; @@ -720,6 +722,7 @@ message StatefulSetSpec { // of the StatefulSet. Each pod will be named with the format // -. For example, a pod in a StatefulSet named // "web" with index number "3" would be named "web-3". + // The only allowed template.spec.restartPolicy value is "Always". optional k8s.io.api.core.v1.PodTemplateSpec template = 3; // volumeClaimTemplates is a list of claims that pods are allowed to reference. @@ -777,7 +780,7 @@ message StatefulSetSpec { // default ordinals behavior assigns a "0" index to the first replica and // increments the index by one for each additional replica requested. Using // the ordinals field requires the StatefulSetStartOrdinal feature gate to be - // enabled, which is alpha. + // enabled, which is beta. // +optional optional StatefulSetOrdinals ordinals = 11; } diff --git a/vendor/k8s.io/api/apps/v1beta2/types.go b/vendor/k8s.io/api/apps/v1beta2/types.go index dbe4d23bf1..a97ac6fcf0 100644 --- a/vendor/k8s.io/api/apps/v1beta2/types.go +++ b/vendor/k8s.io/api/apps/v1beta2/types.go @@ -43,7 +43,7 @@ type ScaleStatus struct { // actual number of observed instances of the scaled object. Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` - // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // selector is a label query over pods that should match the replicas count. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional // +mapType=atomic Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` @@ -250,6 +250,7 @@ type StatefulSetSpec struct { // of the StatefulSet. Each pod will be named with the format // -. For example, a pod in a StatefulSet named // "web" with index number "3" would be named "web-3". + // The only allowed template.spec.restartPolicy value is "Always". Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` // volumeClaimTemplates is a list of claims that pods are allowed to reference. @@ -307,7 +308,7 @@ type StatefulSetSpec struct { // default ordinals behavior assigns a "0" index to the first replica and // increments the index by one for each additional replica requested. Using // the ordinals field requires the StatefulSetStartOrdinal feature gate to be - // enabled, which is alpha. + // enabled, which is beta. // +optional Ordinals *StatefulSetOrdinals `json:"ordinals,omitempty" protobuf:"bytes,11,opt,name=ordinals"` } @@ -429,6 +430,7 @@ type DeploymentSpec struct { Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` // Template describes the pods that will be created. + // The only allowed template.spec.restartPolicy value is "Always". Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` // The deployment strategy to use to replace existing pods with new ones. @@ -690,6 +692,7 @@ type DaemonSetSpec struct { // The DaemonSet will create exactly one copy of this pod on every node // that matches the template's node selector (or on every node if no node // selector is specified). + // The only allowed template.spec.restartPolicy value is "Always". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,2,opt,name=template"` diff --git a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go index 1936a24672..d7e9209915 100644 --- a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta2 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ControllerRevision = map[string]string{ @@ -85,7 +85,7 @@ func (DaemonSetList) SwaggerDoc() map[string]string { var map_DaemonSetSpec = map[string]string{ "": "DaemonSetSpec is the specification of a daemon set.", "selector": "A label query over pods that are managed by the daemon set. Must match in order to be controlled. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", + "template": "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). The only allowed template.spec.restartPolicy value is \"Always\". More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", "updateStrategy": "An update strategy to replace existing DaemonSet pods with new pods.", "minReadySeconds": "The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).", "revisionHistoryLimit": "The number of old history to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", @@ -162,7 +162,7 @@ var map_DeploymentSpec = map[string]string{ "": "DeploymentSpec is the specification of the desired behavior of the Deployment.", "replicas": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", "selector": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment. It must match the pod template's labels.", - "template": "Template describes the pods that will be created.", + "template": "Template describes the pods that will be created. The only allowed template.spec.restartPolicy value is \"Always\".", "strategy": "The deployment strategy to use to replace existing pods with new ones.", "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", @@ -313,7 +313,7 @@ func (ScaleSpec) SwaggerDoc() map[string]string { var map_ScaleStatus = map[string]string{ "": "ScaleStatus represents the current status of a scale subresource.", "replicas": "actual number of observed instances of the scaled object.", - "selector": "label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", + "selector": "selector is a label query over pods that should match the replicas count. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/", "targetSelector": "label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", } @@ -375,7 +375,7 @@ var map_StatefulSetSpec = map[string]string{ "": "A StatefulSetSpec is the specification of a StatefulSet.", "replicas": "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", "selector": "selector is a label query over pods that should match the replica count. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet. Each pod will be named with the format -. For example, a pod in a StatefulSet named \"web\" with index number \"3\" would be named \"web-3\".", + "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet. Each pod will be named with the format -. For example, a pod in a StatefulSet named \"web\" with index number \"3\" would be named \"web-3\". The only allowed template.spec.restartPolicy value is \"Always\".", "volumeClaimTemplates": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", "serviceName": "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.", "podManagementPolicy": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.", @@ -383,7 +383,7 @@ var map_StatefulSetSpec = map[string]string{ "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", "persistentVolumeClaimRetentionPolicy": "PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha.", - "ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested. Using the ordinals field requires the StatefulSetStartOrdinal feature gate to be enabled, which is alpha.", + "ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested. Using the ordinals field requires the StatefulSetStartOrdinal feature gate to be enabled, which is beta.", } func (StatefulSetSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go index 5d37ac1f8d..b1a730b816 100644 --- a/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_BoundObjectReference = map[string]string{ diff --git a/vendor/k8s.io/api/authentication/v1alpha1/generated.proto b/vendor/k8s.io/api/authentication/v1alpha1/generated.proto index 3198dce3bd..51d9252440 100644 --- a/vendor/k8s.io/api/authentication/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/authentication/v1alpha1/generated.proto @@ -30,7 +30,8 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; option go_package = "k8s.io/api/authentication/v1alpha1"; // SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. -// When using impersonation, users will receive the user info of the user being impersonated. +// When using impersonation, users will receive the user info of the user being impersonated. If impersonation or +// request header authentication is used, any extra keys will have their case ignored and returned as lowercase. message SelfSubjectReview { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata diff --git a/vendor/k8s.io/api/authentication/v1alpha1/types.go b/vendor/k8s.io/api/authentication/v1alpha1/types.go index da65028cdd..1ee3612fbc 100644 --- a/vendor/k8s.io/api/authentication/v1alpha1/types.go +++ b/vendor/k8s.io/api/authentication/v1alpha1/types.go @@ -25,10 +25,11 @@ import ( // +genclient:nonNamespaced // +genclient:onlyVerbs=create // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.25 +// +k8s:prerelease-lifecycle-gen:introduced=1.26 // SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. -// When using impersonation, users will receive the user info of the user being impersonated. +// When using impersonation, users will receive the user info of the user being impersonated. If impersonation or +// request header authentication is used, any extra keys will have their case ignored and returned as lowercase. type SelfSubjectReview struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. diff --git a/vendor/k8s.io/api/authentication/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authentication/v1alpha1/types_swagger_doc_generated.go index bc17c5f30d..1ffcc99e7d 100644 --- a/vendor/k8s.io/api/authentication/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authentication/v1alpha1/types_swagger_doc_generated.go @@ -24,11 +24,11 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_SelfSubjectReview = map[string]string{ - "": "SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. When using impersonation, users will receive the user info of the user being impersonated.", + "": "SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. When using impersonation, users will receive the user info of the user being impersonated. If impersonation or request header authentication is used, any extra keys will have their case ignored and returned as lowercase.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "status": "Status is filled in by the server with the user attributes.", } diff --git a/vendor/k8s.io/api/authentication/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/authentication/v1alpha1/zz_generated.prerelease-lifecycle.go index b86dfbef69..62a70a781d 100644 --- a/vendor/k8s.io/api/authentication/v1alpha1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/authentication/v1alpha1/zz_generated.prerelease-lifecycle.go @@ -24,17 +24,17 @@ package v1alpha1 // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. func (in *SelfSubjectReview) APILifecycleIntroduced() (major, minor int) { - return 1, 25 + return 1, 26 } // APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. func (in *SelfSubjectReview) APILifecycleDeprecated() (major, minor int) { - return 1, 28 + return 1, 29 } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *SelfSubjectReview) APILifecycleRemoved() (major, minor int) { - return 1, 31 + return 1, 32 } diff --git a/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go b/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go index 1978dcf6ab..7f1d5ca6ce 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go @@ -72,10 +72,66 @@ func (m *ExtraValue) XXX_DiscardUnknown() { var xxx_messageInfo_ExtraValue proto.InternalMessageInfo +func (m *SelfSubjectReview) Reset() { *m = SelfSubjectReview{} } +func (*SelfSubjectReview) ProtoMessage() {} +func (*SelfSubjectReview) Descriptor() ([]byte, []int) { + return fileDescriptor_77c9b20d3ad27844, []int{1} +} +func (m *SelfSubjectReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectReview.Merge(m, src) +} +func (m *SelfSubjectReview) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectReview) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectReview.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectReview proto.InternalMessageInfo + +func (m *SelfSubjectReviewStatus) Reset() { *m = SelfSubjectReviewStatus{} } +func (*SelfSubjectReviewStatus) ProtoMessage() {} +func (*SelfSubjectReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_77c9b20d3ad27844, []int{2} +} +func (m *SelfSubjectReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectReviewStatus.Merge(m, src) +} +func (m *SelfSubjectReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectReviewStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectReviewStatus proto.InternalMessageInfo + func (m *TokenReview) Reset() { *m = TokenReview{} } func (*TokenReview) ProtoMessage() {} func (*TokenReview) Descriptor() ([]byte, []int) { - return fileDescriptor_77c9b20d3ad27844, []int{1} + return fileDescriptor_77c9b20d3ad27844, []int{3} } func (m *TokenReview) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -103,7 +159,7 @@ var xxx_messageInfo_TokenReview proto.InternalMessageInfo func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} } func (*TokenReviewSpec) ProtoMessage() {} func (*TokenReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_77c9b20d3ad27844, []int{2} + return fileDescriptor_77c9b20d3ad27844, []int{4} } func (m *TokenReviewSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -131,7 +187,7 @@ var xxx_messageInfo_TokenReviewSpec proto.InternalMessageInfo func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} } func (*TokenReviewStatus) ProtoMessage() {} func (*TokenReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_77c9b20d3ad27844, []int{3} + return fileDescriptor_77c9b20d3ad27844, []int{5} } func (m *TokenReviewStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -159,7 +215,7 @@ var xxx_messageInfo_TokenReviewStatus proto.InternalMessageInfo func (m *UserInfo) Reset() { *m = UserInfo{} } func (*UserInfo) ProtoMessage() {} func (*UserInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_77c9b20d3ad27844, []int{4} + return fileDescriptor_77c9b20d3ad27844, []int{6} } func (m *UserInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -186,6 +242,8 @@ var xxx_messageInfo_UserInfo proto.InternalMessageInfo func init() { proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authentication.v1beta1.ExtraValue") + proto.RegisterType((*SelfSubjectReview)(nil), "k8s.io.api.authentication.v1beta1.SelfSubjectReview") + proto.RegisterType((*SelfSubjectReviewStatus)(nil), "k8s.io.api.authentication.v1beta1.SelfSubjectReviewStatus") proto.RegisterType((*TokenReview)(nil), "k8s.io.api.authentication.v1beta1.TokenReview") proto.RegisterType((*TokenReviewSpec)(nil), "k8s.io.api.authentication.v1beta1.TokenReviewSpec") proto.RegisterType((*TokenReviewStatus)(nil), "k8s.io.api.authentication.v1beta1.TokenReviewStatus") @@ -198,49 +256,53 @@ func init() { } var fileDescriptor_77c9b20d3ad27844 = []byte{ - // 666 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xcf, 0x4e, 0x13, 0x5f, - 0x14, 0x9e, 0xe9, 0x1f, 0xd2, 0xde, 0xfe, 0xfa, 0x13, 0x6f, 0x62, 0xd2, 0x34, 0x71, 0x0a, 0x75, - 0x43, 0x82, 0xdc, 0x11, 0x42, 0x90, 0xe0, 0x8a, 0x51, 0x42, 0x30, 0x21, 0x26, 0x57, 0x70, 0xa1, - 0x2e, 0xbc, 0x9d, 0x1e, 0xa6, 0x63, 0x9d, 0x3f, 0xb9, 0x73, 0xa7, 0xca, 0x8e, 0x47, 0x70, 0xe9, - 0xd2, 0xc4, 0x27, 0x71, 0xc7, 0x92, 0x25, 0x0b, 0xd3, 0xc8, 0xf8, 0x04, 0xbe, 0x81, 0xb9, 0x77, - 0x2e, 0x4c, 0x81, 0x68, 0x61, 0x37, 0xf7, 0x3b, 0xe7, 0xfb, 0xce, 0x39, 0xdf, 0xe9, 0x29, 0x7a, - 0x3e, 0x5c, 0x4f, 0x88, 0x1f, 0xd9, 0xc3, 0xb4, 0x07, 0x3c, 0x04, 0x01, 0x89, 0x3d, 0x82, 0xb0, - 0x1f, 0x71, 0x5b, 0x07, 0x58, 0xec, 0xdb, 0x2c, 0x15, 0x03, 0x08, 0x85, 0xef, 0x32, 0xe1, 0x47, - 0xa1, 0x3d, 0x5a, 0xee, 0x81, 0x60, 0xcb, 0xb6, 0x07, 0x21, 0x70, 0x26, 0xa0, 0x4f, 0x62, 0x1e, - 0x89, 0x08, 0xcf, 0xe7, 0x14, 0xc2, 0x62, 0x9f, 0x5c, 0xa6, 0x10, 0x4d, 0x69, 0x2f, 0x79, 0xbe, - 0x18, 0xa4, 0x3d, 0xe2, 0x46, 0x81, 0xed, 0x45, 0x5e, 0x64, 0x2b, 0x66, 0x2f, 0x3d, 0x50, 0x2f, - 0xf5, 0x50, 0x5f, 0xb9, 0x62, 0x7b, 0xb5, 0x68, 0x22, 0x60, 0xee, 0xc0, 0x0f, 0x81, 0x1f, 0xda, - 0xf1, 0xd0, 0x93, 0x40, 0x62, 0x07, 0x20, 0x98, 0x3d, 0xba, 0xd6, 0x47, 0xdb, 0xfe, 0x1b, 0x8b, - 0xa7, 0xa1, 0xf0, 0x03, 0xb8, 0x46, 0x58, 0x9b, 0x46, 0x48, 0xdc, 0x01, 0x04, 0xec, 0x2a, 0xaf, - 0xfb, 0x18, 0xa1, 0xad, 0x4f, 0x82, 0xb3, 0x57, 0xec, 0x43, 0x0a, 0xb8, 0x83, 0xaa, 0xbe, 0x80, - 0x20, 0x69, 0x99, 0x73, 0xe5, 0x85, 0xba, 0x53, 0xcf, 0xc6, 0x9d, 0xea, 0x8e, 0x04, 0x68, 0x8e, - 0x6f, 0xd4, 0xbe, 0x7c, 0xed, 0x18, 0x47, 0x3f, 0xe6, 0x8c, 0xee, 0xb7, 0x12, 0x6a, 0xec, 0x45, - 0x43, 0x08, 0x29, 0x8c, 0x7c, 0xf8, 0x88, 0xdf, 0xa1, 0x9a, 0x1c, 0xa6, 0xcf, 0x04, 0x6b, 0x99, - 0x73, 0xe6, 0x42, 0x63, 0xe5, 0x11, 0x29, 0xcc, 0xbc, 0xe8, 0x89, 0xc4, 0x43, 0x4f, 0x02, 0x09, - 0x91, 0xd9, 0x64, 0xb4, 0x4c, 0x5e, 0xf4, 0xde, 0x83, 0x2b, 0x76, 0x41, 0x30, 0x07, 0x1f, 0x8f, - 0x3b, 0x46, 0x36, 0xee, 0xa0, 0x02, 0xa3, 0x17, 0xaa, 0x78, 0x0f, 0x55, 0x92, 0x18, 0xdc, 0x56, - 0x49, 0xa9, 0xaf, 0x90, 0xa9, 0xab, 0x22, 0x13, 0xfd, 0xbd, 0x8c, 0xc1, 0x75, 0xfe, 0xd3, 0xfa, - 0x15, 0xf9, 0xa2, 0x4a, 0x0d, 0xbf, 0x45, 0x33, 0x89, 0x60, 0x22, 0x4d, 0x5a, 0x65, 0xa5, 0xbb, - 0x7a, 0x4b, 0x5d, 0xc5, 0x75, 0xfe, 0xd7, 0xca, 0x33, 0xf9, 0x9b, 0x6a, 0xcd, 0xae, 0x8b, 0xee, - 0x5c, 0x69, 0x02, 0x3f, 0x40, 0x55, 0x21, 0x21, 0xe5, 0x52, 0xdd, 0x69, 0x6a, 0x66, 0x35, 0xcf, - 0xcb, 0x63, 0x78, 0x11, 0xd5, 0x59, 0xda, 0xf7, 0x21, 0x74, 0x21, 0x69, 0x95, 0xd4, 0x32, 0x9a, - 0xd9, 0xb8, 0x53, 0xdf, 0x3c, 0x07, 0x69, 0x11, 0xef, 0xfe, 0x36, 0xd1, 0xdd, 0x6b, 0x2d, 0xe1, - 0x27, 0xa8, 0x39, 0xd1, 0x3e, 0xf4, 0x55, 0xbd, 0x9a, 0x73, 0x4f, 0xd7, 0x6b, 0x6e, 0x4e, 0x06, - 0xe9, 0xe5, 0x5c, 0xbc, 0x8b, 0x2a, 0x69, 0x02, 0x5c, 0x7b, 0xbd, 0x78, 0x03, 0x4f, 0xf6, 0x13, - 0xe0, 0x3b, 0xe1, 0x41, 0x54, 0x98, 0x2c, 0x11, 0xaa, 0x64, 0x2e, 0x8f, 0x53, 0xf9, 0xf7, 0x38, - 0xd2, 0x20, 0xe0, 0x3c, 0xe2, 0x6a, 0x21, 0x13, 0x06, 0x6d, 0x49, 0x90, 0xe6, 0xb1, 0xee, 0xf7, - 0x12, 0xaa, 0x9d, 0x97, 0xc4, 0x0f, 0x51, 0x4d, 0x96, 0x09, 0x59, 0x00, 0xda, 0xd5, 0x59, 0x4d, - 0x52, 0x39, 0x12, 0xa7, 0x17, 0x19, 0xf8, 0x3e, 0x2a, 0xa7, 0x7e, 0x5f, 0x8d, 0x56, 0x77, 0x1a, - 0x3a, 0xb1, 0xbc, 0xbf, 0xf3, 0x8c, 0x4a, 0x1c, 0x77, 0xd1, 0x8c, 0xc7, 0xa3, 0x34, 0x96, 0x3f, - 0x08, 0xd9, 0x28, 0x92, 0x6b, 0xdd, 0x56, 0x08, 0xd5, 0x11, 0xfc, 0x06, 0x55, 0x41, 0x5e, 0x8d, - 0x9a, 0xa5, 0xb1, 0xb2, 0x76, 0x0b, 0x7f, 0x88, 0x3a, 0xb7, 0xad, 0x50, 0xf0, 0xc3, 0x89, 0xd1, - 0x24, 0x46, 0x73, 0xcd, 0xb6, 0xa7, 0x4f, 0x52, 0xe5, 0xe0, 0x59, 0x54, 0x1e, 0xc2, 0x61, 0x3e, - 0x16, 0x95, 0x9f, 0xf8, 0x29, 0xaa, 0x8e, 0xe4, 0xb5, 0xea, 0xe5, 0x2c, 0xdd, 0xa0, 0x78, 0x71, - 0xe2, 0x34, 0xe7, 0x6e, 0x94, 0xd6, 0x4d, 0x67, 0xfb, 0xf8, 0xcc, 0x32, 0x4e, 0xce, 0x2c, 0xe3, - 0xf4, 0xcc, 0x32, 0x8e, 0x32, 0xcb, 0x3c, 0xce, 0x2c, 0xf3, 0x24, 0xb3, 0xcc, 0xd3, 0xcc, 0x32, - 0x7f, 0x66, 0x96, 0xf9, 0xf9, 0x97, 0x65, 0xbc, 0x9e, 0x9f, 0xfa, 0x2f, 0xfa, 0x27, 0x00, 0x00, - 0xff, 0xff, 0xb8, 0x72, 0x2c, 0x2c, 0x82, 0x05, 0x00, 0x00, + // 725 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0x4f, 0x4f, 0x13, 0x41, + 0x14, 0xef, 0xf6, 0x0f, 0x69, 0xa7, 0x56, 0x61, 0x12, 0x23, 0x69, 0xe2, 0x16, 0x6a, 0x62, 0x48, + 0x80, 0x59, 0x21, 0x04, 0x09, 0x9e, 0x58, 0x25, 0x04, 0x13, 0x62, 0x32, 0x05, 0x0f, 0xea, 0xc1, + 0xe9, 0xf6, 0xb1, 0x5d, 0x4b, 0x77, 0x37, 0xbb, 0xb3, 0x55, 0x6e, 0x7c, 0x04, 0x8f, 0x1e, 0x4d, + 0xfc, 0x24, 0xde, 0x38, 0x72, 0xc4, 0xc4, 0x34, 0xb2, 0x7e, 0x02, 0xbf, 0x81, 0x99, 0xd9, 0x61, + 0xdb, 0x82, 0x14, 0xb8, 0x78, 0xdb, 0xf9, 0xcd, 0xfb, 0xfd, 0xde, 0x7b, 0xbf, 0xf7, 0x32, 0x8b, + 0x5e, 0x76, 0xd6, 0x42, 0xe2, 0x78, 0x46, 0x27, 0x6a, 0x42, 0xe0, 0x02, 0x87, 0xd0, 0xe8, 0x81, + 0xdb, 0xf2, 0x02, 0x43, 0x5d, 0x30, 0xdf, 0x31, 0x58, 0xc4, 0xdb, 0xe0, 0x72, 0xc7, 0x62, 0xdc, + 0xf1, 0x5c, 0xa3, 0xb7, 0xd4, 0x04, 0xce, 0x96, 0x0c, 0x1b, 0x5c, 0x08, 0x18, 0x87, 0x16, 0xf1, + 0x03, 0x8f, 0x7b, 0x78, 0x36, 0xa1, 0x10, 0xe6, 0x3b, 0x64, 0x94, 0x42, 0x14, 0xa5, 0xba, 0x68, + 0x3b, 0xbc, 0x1d, 0x35, 0x89, 0xe5, 0x75, 0x0d, 0xdb, 0xb3, 0x3d, 0x43, 0x32, 0x9b, 0xd1, 0xbe, + 0x3c, 0xc9, 0x83, 0xfc, 0x4a, 0x14, 0xab, 0x0b, 0xe3, 0x8a, 0xb8, 0x98, 0xbf, 0xba, 0x32, 0x88, + 0xee, 0x32, 0xab, 0xed, 0xb8, 0x10, 0x1c, 0x1a, 0x7e, 0xc7, 0x16, 0x40, 0x68, 0x74, 0x81, 0xb3, + 0x7f, 0xb1, 0x8c, 0xab, 0x58, 0x41, 0xe4, 0x72, 0xa7, 0x0b, 0x97, 0x08, 0xab, 0xd7, 0x11, 0x42, + 0xab, 0x0d, 0x5d, 0x76, 0x91, 0x57, 0x7f, 0x8a, 0xd0, 0xe6, 0x27, 0x1e, 0xb0, 0xd7, 0xec, 0x20, + 0x02, 0x5c, 0x43, 0x05, 0x87, 0x43, 0x37, 0x9c, 0xd6, 0x66, 0x72, 0x73, 0x25, 0xb3, 0x14, 0xf7, + 0x6b, 0x85, 0x6d, 0x01, 0xd0, 0x04, 0x5f, 0x2f, 0x7e, 0xf9, 0x5a, 0xcb, 0x1c, 0xfd, 0x9c, 0xc9, + 0xd4, 0x7f, 0x68, 0x68, 0xaa, 0x01, 0x07, 0xfb, 0x8d, 0xa8, 0xf9, 0x01, 0x2c, 0x4e, 0xa1, 0xe7, + 0xc0, 0x47, 0xfc, 0x1e, 0x15, 0x45, 0x4b, 0x2d, 0xc6, 0xd9, 0xb4, 0x36, 0xa3, 0xcd, 0x95, 0x97, + 0x9f, 0x90, 0xc1, 0x00, 0xd2, 0xca, 0x88, 0xdf, 0xb1, 0x05, 0x10, 0x12, 0x11, 0x4d, 0x7a, 0x4b, + 0xe4, 0x95, 0x54, 0xd9, 0x01, 0xce, 0x4c, 0x7c, 0xdc, 0xaf, 0x65, 0xe2, 0x7e, 0x0d, 0x0d, 0x30, + 0x9a, 0xaa, 0xe2, 0x26, 0x9a, 0x08, 0x39, 0xe3, 0x51, 0x38, 0x9d, 0x95, 0xfa, 0xeb, 0xe4, 0xda, + 0x01, 0x93, 0x4b, 0x75, 0x36, 0xa4, 0x82, 0x79, 0x57, 0x65, 0x9a, 0x48, 0xce, 0x54, 0x29, 0xd7, + 0x3d, 0xf4, 0xe0, 0x0a, 0x0a, 0xde, 0x45, 0xc5, 0x28, 0x84, 0x60, 0xdb, 0xdd, 0xf7, 0x54, 0x83, + 0x8f, 0xc7, 0x16, 0x40, 0xf6, 0x54, 0xb4, 0x39, 0xa9, 0x92, 0x15, 0xcf, 0x11, 0x9a, 0x2a, 0xd5, + 0xbf, 0x65, 0x51, 0x79, 0xd7, 0xeb, 0x80, 0xfb, 0xdf, 0x6c, 0xdc, 0x45, 0xf9, 0xd0, 0x07, 0x4b, + 0x99, 0xb8, 0x7c, 0x03, 0x13, 0x87, 0xea, 0x6b, 0xf8, 0x60, 0x99, 0x77, 0x94, 0x7e, 0x5e, 0x9c, + 0xa8, 0x54, 0xc3, 0xef, 0xd2, 0xe1, 0xe4, 0xa4, 0xee, 0xca, 0x2d, 0x75, 0xc7, 0x8f, 0xc5, 0x42, + 0xf7, 0x2e, 0x14, 0x81, 0x1f, 0xa1, 0x02, 0x17, 0x90, 0x74, 0xa9, 0x64, 0x56, 0x14, 0xb3, 0x90, + 0xc4, 0x25, 0x77, 0x78, 0x1e, 0x95, 0x58, 0xd4, 0x72, 0xc0, 0xb5, 0x40, 0x6c, 0x8d, 0xd8, 0xec, + 0x4a, 0xdc, 0xaf, 0x95, 0x36, 0xce, 0x41, 0x3a, 0xb8, 0xaf, 0xff, 0xd1, 0xd0, 0xd4, 0xa5, 0x92, + 0xf0, 0x33, 0x54, 0x19, 0x2a, 0x1f, 0x5a, 0x32, 0x5f, 0xd1, 0xbc, 0xaf, 0xf2, 0x55, 0x36, 0x86, + 0x2f, 0xe9, 0x68, 0x2c, 0xde, 0x41, 0x79, 0x31, 0x69, 0xe5, 0xf5, 0xfc, 0x0d, 0x3c, 0x49, 0x97, + 0x26, 0x35, 0x59, 0x20, 0x54, 0xca, 0x8c, 0xb6, 0x93, 0x1f, 0xdf, 0x8e, 0x30, 0x08, 0x82, 0xc0, + 0x0b, 0xe4, 0x40, 0x86, 0x0c, 0xda, 0x14, 0x20, 0x4d, 0xee, 0xea, 0xdf, 0xb3, 0x28, 0xdd, 0x4a, + 0xbc, 0x90, 0x6c, 0xb8, 0xcb, 0xba, 0xa0, 0x5c, 0x1d, 0xd9, 0x5c, 0x81, 0xd3, 0x34, 0x02, 0x3f, + 0x44, 0xb9, 0xc8, 0x69, 0xc9, 0xd6, 0x4a, 0x66, 0x59, 0x05, 0xe6, 0xf6, 0xb6, 0x5f, 0x50, 0x81, + 0xe3, 0x3a, 0x9a, 0xb0, 0x03, 0x2f, 0xf2, 0xc5, 0x42, 0x88, 0x42, 0x91, 0x18, 0xeb, 0x96, 0x44, + 0xa8, 0xba, 0xc1, 0x6f, 0x51, 0x01, 0xc4, 0x13, 0x24, 0x7b, 0x29, 0x2f, 0xaf, 0xde, 0xc2, 0x1f, + 0x22, 0xdf, 0xae, 0x4d, 0x97, 0x07, 0x87, 0x43, 0xad, 0x09, 0x8c, 0x26, 0x9a, 0x55, 0x5b, 0xbd, + 0x6f, 0x32, 0x06, 0x4f, 0xa2, 0x5c, 0x07, 0x0e, 0x93, 0xb6, 0xa8, 0xf8, 0xc4, 0xcf, 0x51, 0xa1, + 0x27, 0x9e, 0x3e, 0x35, 0x9c, 0xc5, 0x1b, 0x24, 0x1f, 0xbc, 0x97, 0x34, 0xe1, 0xae, 0x67, 0xd7, + 0x34, 0x73, 0xeb, 0xf8, 0x4c, 0xcf, 0x9c, 0x9c, 0xe9, 0x99, 0xd3, 0x33, 0x3d, 0x73, 0x14, 0xeb, + 0xda, 0x71, 0xac, 0x6b, 0x27, 0xb1, 0xae, 0x9d, 0xc6, 0xba, 0xf6, 0x2b, 0xd6, 0xb5, 0xcf, 0xbf, + 0xf5, 0xcc, 0x9b, 0xd9, 0x6b, 0x7f, 0x60, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xcb, 0x19, 0x49, + 0x3f, 0xfd, 0x06, 0x00, 0x00, } func (m ExtraValue) Marshal() (dAtA []byte, err error) { @@ -275,6 +337,82 @@ func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *SelfSubjectReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelfSubjectReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SelfSubjectReviewStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelfSubjectReviewStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.UserInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *TokenReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -517,6 +655,30 @@ func (m ExtraValue) Size() (n int) { return n } +func (m *SelfSubjectReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SelfSubjectReviewStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.UserInfo.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *TokenReview) Size() (n int) { if m == nil { return 0 @@ -603,6 +765,27 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *SelfSubjectReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SelfSubjectReviewStatus", "SelfSubjectReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SelfSubjectReviewStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectReviewStatus{`, + `UserInfo:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.UserInfo), "UserInfo", "v11.UserInfo", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} func (this *TokenReview) String() string { if this == nil { return "nil" @@ -752,6 +935,205 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } return nil } +func (m *SelfSubjectReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelfSubjectReviewStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectReviewStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *TokenReview) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/k8s.io/api/authentication/v1beta1/generated.proto b/vendor/k8s.io/api/authentication/v1beta1/generated.proto index d1847a02e5..53b4635d7e 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/generated.proto +++ b/vendor/k8s.io/api/authentication/v1beta1/generated.proto @@ -21,6 +21,7 @@ syntax = "proto2"; package k8s.io.api.authentication.v1beta1; +import "k8s.io/api/authentication/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -37,6 +38,26 @@ message ExtraValue { repeated string items = 1; } +// SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. +// When using impersonation, users will receive the user info of the user being impersonated. If impersonation or +// request header authentication is used, any extra keys will have their case ignored and returned as lowercase. +message SelfSubjectReview { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Status is filled in by the server with the user attributes. + optional SelfSubjectReviewStatus status = 2; +} + +// SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user. +message SelfSubjectReviewStatus { + // User attributes of the user making this request. + // +optional + optional k8s.io.api.authentication.v1.UserInfo userInfo = 1; +} + // TokenReview attempts to authenticate a token to a known user. // Note: TokenReview requests may be cached by the webhook token authenticator // plugin in the kube-apiserver. diff --git a/vendor/k8s.io/api/authentication/v1beta1/register.go b/vendor/k8s.io/api/authentication/v1beta1/register.go index ed23e50f7e..075ee1263d 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/register.go +++ b/vendor/k8s.io/api/authentication/v1beta1/register.go @@ -44,6 +44,7 @@ var ( // Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, + &SelfSubjectReview{}, &TokenReview{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/vendor/k8s.io/api/authentication/v1beta1/types.go b/vendor/k8s.io/api/authentication/v1beta1/types.go index 08e1e09b64..5bce82e7cf 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/types.go +++ b/vendor/k8s.io/api/authentication/v1beta1/types.go @@ -19,6 +19,7 @@ package v1beta1 import ( "fmt" + v1 "k8s.io/api/authentication/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -113,3 +114,29 @@ type ExtraValue []string func (t ExtraValue) String() string { return fmt.Sprintf("%v", []string(t)) } + +// +genclient +// +genclient:nonNamespaced +// +genclient:onlyVerbs=create +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.27 + +// SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. +// When using impersonation, users will receive the user info of the user being impersonated. If impersonation or +// request header authentication is used, any extra keys will have their case ignored and returned as lowercase. +type SelfSubjectReview struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Status is filled in by the server with the user attributes. + Status SelfSubjectReviewStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` +} + +// SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user. +type SelfSubjectReviewStatus struct { + // User attributes of the user making this request. + // +optional + UserInfo v1.UserInfo `json:"userInfo,omitempty" protobuf:"bytes,1,opt,name=userInfo"` +} diff --git a/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go index 1086955c3a..d6644f2cf9 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go @@ -24,9 +24,28 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_SelfSubjectReview = map[string]string{ + "": "SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. When using impersonation, users will receive the user info of the user being impersonated. If impersonation or request header authentication is used, any extra keys will have their case ignored and returned as lowercase.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "status": "Status is filled in by the server with the user attributes.", +} + +func (SelfSubjectReview) SwaggerDoc() map[string]string { + return map_SelfSubjectReview +} + +var map_SelfSubjectReviewStatus = map[string]string{ + "": "SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user.", + "userInfo": "User attributes of the user making this request.", +} + +func (SelfSubjectReviewStatus) SwaggerDoc() map[string]string { + return map_SelfSubjectReviewStatus +} + var map_TokenReview = map[string]string{ "": "TokenReview attempts to authenticate a token to a known user. Note: TokenReview requests may be cached by the webhook token authenticator plugin in the kube-apiserver.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", diff --git a/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go index 059ec1a864..99ffadf7ba 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go @@ -45,6 +45,50 @@ func (in ExtraValue) DeepCopy() ExtraValue { return *out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfSubjectReview) DeepCopyInto(out *SelfSubjectReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectReview. +func (in *SelfSubjectReview) DeepCopy() *SelfSubjectReview { + if in == nil { + return nil + } + out := new(SelfSubjectReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SelfSubjectReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfSubjectReviewStatus) DeepCopyInto(out *SelfSubjectReviewStatus) { + *out = *in + in.UserInfo.DeepCopyInto(&out.UserInfo) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectReviewStatus. +func (in *SelfSubjectReviewStatus) DeepCopy() *SelfSubjectReviewStatus { + if in == nil { + return nil + } + out := new(SelfSubjectReviewStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TokenReview) DeepCopyInto(out *TokenReview) { *out = *in diff --git a/vendor/k8s.io/api/authentication/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.prerelease-lifecycle.go index e448106e41..904796925c 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.prerelease-lifecycle.go @@ -25,6 +25,24 @@ import ( schema "k8s.io/apimachinery/pkg/runtime/schema" ) +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *SelfSubjectReview) APILifecycleIntroduced() (major, minor int) { + return 1, 27 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *SelfSubjectReview) APILifecycleDeprecated() (major, minor int) { + return 1, 30 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *SelfSubjectReview) APILifecycleRemoved() (major, minor int) { + return 1, 33 +} + // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. func (in *TokenReview) APILifecycleIntroduced() (major, minor int) { diff --git a/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go index 2e5fbea7ad..93229485cc 100644 --- a/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_LocalSubjectAccessReview = map[string]string{ diff --git a/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go index 2d291189eb..e0846be7a4 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_LocalSubjectAccessReview = map[string]string{ diff --git a/vendor/k8s.io/api/autoscaling/v1/generated.proto b/vendor/k8s.io/api/autoscaling/v1/generated.proto index 8cf997a757..1dbafd1a53 100644 --- a/vendor/k8s.io/api/autoscaling/v1/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v1/generated.proto @@ -87,13 +87,13 @@ message ContainerResourceMetricStatus { // CrossVersionObjectReference contains enough information to let you identify the referred resource. // +structType=atomic message CrossVersionObjectReference { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds optional string kind = 1; - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + // name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names optional string name = 2; - // API version of the referent + // apiVersion is the API version of the referent // +optional optional string apiVersion = 3; } @@ -147,11 +147,11 @@ message HorizontalPodAutoscaler { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // spec defines the behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional optional HorizontalPodAutoscalerSpec spec = 2; - // current information about the autoscaler. + // status is the current information about the autoscaler. // +optional optional HorizontalPodAutoscalerStatus status = 3; } @@ -186,7 +186,7 @@ message HorizontalPodAutoscalerList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // list of horizontal pod autoscaler objects. + // items is the list of horizontal pod autoscaler objects. repeated HorizontalPodAutoscaler items = 2; } @@ -204,10 +204,10 @@ message HorizontalPodAutoscalerSpec { // +optional optional int32 minReplicas = 2; - // upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. + // maxReplicas is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. optional int32 maxReplicas = 3; - // target average CPU utilization (represented as a percentage of requested CPU) over all the pods; + // targetCPUUtilizationPercentage is the target average CPU utilization (represented as a percentage of requested CPU) over all the pods; // if not specified the default autoscaling policy will be used. // +optional optional int32 targetCPUUtilizationPercentage = 4; @@ -215,22 +215,22 @@ message HorizontalPodAutoscalerSpec { // current status of a horizontal pod autoscaler message HorizontalPodAutoscalerStatus { - // most recent generation observed by this autoscaler. + // observedGeneration is the most recent generation observed by this autoscaler. // +optional optional int64 observedGeneration = 1; - // last time the HorizontalPodAutoscaler scaled the number of pods; + // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods; // used by the autoscaler to control how often the number of pods is changed. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; - // current number of replicas of pods managed by this autoscaler. + // currentReplicas is the current number of replicas of pods managed by this autoscaler. optional int32 currentReplicas = 3; - // desired number of replicas of pods managed by this autoscaler. + // desiredReplicas is the desired number of replicas of pods managed by this autoscaler. optional int32 desiredReplicas = 4; - // current average CPU utilization over all pods, represented as a percentage of requested CPU, + // currentCPUUtilizationPercentage is the current average CPU utilization over all pods, represented as a percentage of requested CPU, // e.g. 70 means that an average pod is using now 70% of its requested CPU. // +optional optional int32 currentCPUUtilizationPercentage = 5; @@ -264,7 +264,7 @@ message MetricSpec { // +optional optional ResourceMetricSource resource = 4; - // container resource refers to a resource metric (such as those specified in + // containerResource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing a single container in each pod of the // current scale target (e.g. CPU or memory). Such metrics are built in to // Kubernetes, and have special scaling options on top of those available @@ -309,7 +309,7 @@ message MetricStatus { // +optional optional ResourceMetricStatus resource = 4; - // container resource refers to a resource metric (such as those specified in + // containerResource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing a single container in each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to // Kubernetes, and have special scaling options on top of those available @@ -464,31 +464,31 @@ message Scale { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // spec defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional optional ScaleSpec spec = 2; - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. + // status is the current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. // +optional optional ScaleStatus status = 3; } // ScaleSpec describes the attributes of a scale subresource. message ScaleSpec { - // desired number of instances for the scaled object. + // replicas is the desired number of instances for the scaled object. // +optional optional int32 replicas = 1; } // ScaleStatus represents the current status of a scale subresource. message ScaleStatus { - // actual number of observed instances of the scaled object. + // replicas is the actual number of observed instances of the scaled object. optional int32 replicas = 1; - // label query over pods that should match the replicas count. This is same + // selector is the label query over pods that should match the replicas count. This is same // as the label selector but in the string format to avoid introspection // by clients. The string will be in the same format as the query-param syntax. - // More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors + // More info about label selectors: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional optional string selector = 2; } diff --git a/vendor/k8s.io/api/autoscaling/v1/types.go b/vendor/k8s.io/api/autoscaling/v1/types.go index 6397430a22..4508290176 100644 --- a/vendor/k8s.io/api/autoscaling/v1/types.go +++ b/vendor/k8s.io/api/autoscaling/v1/types.go @@ -25,11 +25,13 @@ import ( // CrossVersionObjectReference contains enough information to let you identify the referred resource. // +structType=atomic type CrossVersionObjectReference struct { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + + // name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names Name string `json:"name" protobuf:"bytes,2,opt,name=name"` - // API version of the referent + + // apiVersion is the API version of the referent // +optional APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` } @@ -46,9 +48,11 @@ type HorizontalPodAutoscalerSpec struct { // available. // +optional MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` - // upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. + + // maxReplicas is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"` - // target average CPU utilization (represented as a percentage of requested CPU) over all the pods; + + // targetCPUUtilizationPercentage is the target average CPU utilization (represented as a percentage of requested CPU) over all the pods; // if not specified the default autoscaling policy will be used. // +optional TargetCPUUtilizationPercentage *int32 `json:"targetCPUUtilizationPercentage,omitempty" protobuf:"varint,4,opt,name=targetCPUUtilizationPercentage"` @@ -56,22 +60,22 @@ type HorizontalPodAutoscalerSpec struct { // current status of a horizontal pod autoscaler type HorizontalPodAutoscalerStatus struct { - // most recent generation observed by this autoscaler. + // observedGeneration is the most recent generation observed by this autoscaler. // +optional ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` - // last time the HorizontalPodAutoscaler scaled the number of pods; + // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods; // used by the autoscaler to control how often the number of pods is changed. // +optional LastScaleTime *metav1.Time `json:"lastScaleTime,omitempty" protobuf:"bytes,2,opt,name=lastScaleTime"` - // current number of replicas of pods managed by this autoscaler. + // currentReplicas is the current number of replicas of pods managed by this autoscaler. CurrentReplicas int32 `json:"currentReplicas" protobuf:"varint,3,opt,name=currentReplicas"` - // desired number of replicas of pods managed by this autoscaler. + // desiredReplicas is the desired number of replicas of pods managed by this autoscaler. DesiredReplicas int32 `json:"desiredReplicas" protobuf:"varint,4,opt,name=desiredReplicas"` - // current average CPU utilization over all pods, represented as a percentage of requested CPU, + // currentCPUUtilizationPercentage is the current average CPU utilization over all pods, represented as a percentage of requested CPU, // e.g. 70 means that an average pod is using now 70% of its requested CPU. // +optional CurrentCPUUtilizationPercentage *int32 `json:"currentCPUUtilizationPercentage,omitempty" protobuf:"varint,5,opt,name=currentCPUUtilizationPercentage"` @@ -87,11 +91,11 @@ type HorizontalPodAutoscaler struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // spec defines the behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // current information about the autoscaler. + // status is the current information about the autoscaler. // +optional Status HorizontalPodAutoscalerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -105,7 +109,7 @@ type HorizontalPodAutoscalerList struct { // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // list of horizontal pod autoscaler objects. + // items is the list of horizontal pod autoscaler objects. Items []HorizontalPodAutoscaler `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -118,31 +122,31 @@ type Scale struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // spec defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. + // status is the current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. // +optional Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } // ScaleSpec describes the attributes of a scale subresource. type ScaleSpec struct { - // desired number of instances for the scaled object. + // replicas is the desired number of instances for the scaled object. // +optional Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` } // ScaleStatus represents the current status of a scale subresource. type ScaleStatus struct { - // actual number of observed instances of the scaled object. + // replicas is the actual number of observed instances of the scaled object. Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` - // label query over pods that should match the replicas count. This is same + // selector is the label query over pods that should match the replicas count. This is same // as the label selector but in the string format to avoid introspection // by clients. The string will be in the same format as the query-param syntax. - // More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors + // More info about label selectors: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional Selector string `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` } @@ -194,11 +198,13 @@ type MetricSpec struct { // (for example, hits-per-second on an Ingress object). // +optional Object *ObjectMetricSource `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target // (for example, transactions-processed-per-second). The values will be // averaged together before being compared to the target value. // +optional Pods *PodsMetricSource `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to @@ -206,7 +212,8 @@ type MetricSpec struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricSource `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` - // container resource refers to a resource metric (such as those specified in + + // containerResource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing a single container in each pod of the // current scale target (e.g. CPU or memory). Such metrics are built in to // Kubernetes, and have special scaling options on top of those available @@ -214,6 +221,7 @@ type MetricSpec struct { // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. // +optional ContainerResource *ContainerResourceMetricSource `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -231,6 +239,7 @@ type ObjectMetricSource struct { // metricName is the name of the metric in question. MetricName string `json:"metricName" protobuf:"bytes,2,name=metricName"` + // targetValue is the target value of the metric (as a quantity). TargetValue resource.Quantity `json:"targetValue" protobuf:"bytes,3,name=targetValue"` @@ -239,6 +248,7 @@ type ObjectMetricSource struct { // When unset, just the metricName will be used to gather metrics. // +optional Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,name=selector"` + // averageValue is the target value of the average of the // metric across all relevant pods (as a quantity) // +optional @@ -252,6 +262,7 @@ type ObjectMetricSource struct { type PodsMetricSource struct { // metricName is the name of the metric in question MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` + // targetAverageValue is the target value of the average of the // metric across all relevant pods (as a quantity) TargetAverageValue resource.Quantity `json:"targetAverageValue" protobuf:"bytes,2,name=targetAverageValue"` @@ -273,11 +284,13 @@ type PodsMetricSource struct { type ResourceMetricSource struct { // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // targetAverageUtilization is the target value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. // +optional TargetAverageUtilization *int32 `json:"targetAverageUtilization,omitempty" protobuf:"varint,2,opt,name=targetAverageUtilization"` + // targetAverageValue is the target value of the average of the // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. @@ -295,16 +308,19 @@ type ResourceMetricSource struct { type ContainerResourceMetricSource struct { // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // targetAverageUtilization is the target value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. // +optional TargetAverageUtilization *int32 `json:"targetAverageUtilization,omitempty" protobuf:"varint,2,opt,name=targetAverageUtilization"` + // targetAverageValue is the target value of the average of the // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // +optional TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty" protobuf:"bytes,3,opt,name=targetAverageValue"` + // container is the name of the container in the pods of the scaling target. Container string `json:"container" protobuf:"bytes,5,opt,name=container"` } @@ -315,14 +331,17 @@ type ContainerResourceMetricSource struct { type ExternalMetricSource struct { // metricName is the name of the metric in question. MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` + // metricSelector is used to identify a specific time series // within a given metric. // +optional MetricSelector *metav1.LabelSelector `json:"metricSelector,omitempty" protobuf:"bytes,2,opt,name=metricSelector"` + // targetValue is the target value of the metric (as a quantity). // Mutually exclusive with TargetAverageValue. // +optional TargetValue *resource.Quantity `json:"targetValue,omitempty" protobuf:"bytes,3,opt,name=targetValue"` + // targetAverageValue is the target per-pod value of global metric (as a quantity). // Mutually exclusive with TargetValue. // +optional @@ -341,11 +360,13 @@ type MetricStatus struct { // (for example, hits-per-second on an Ingress object). // +optional Object *ObjectMetricStatus `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target // (for example, transactions-processed-per-second). The values will be // averaged together before being compared to the target value. // +optional Pods *PodsMetricStatus `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to @@ -353,13 +374,15 @@ type MetricStatus struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricStatus `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` - // container resource refers to a resource metric (such as those specified in + + // containerResource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing a single container in each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to // Kubernetes, and have special scaling options on top of those available // to normal per-pod metrics using the "pods" source. // +optional ContainerResource *ContainerResourceMetricStatus `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -390,15 +413,19 @@ const ( type HorizontalPodAutoscalerCondition struct { // type describes the current condition Type HorizontalPodAutoscalerConditionType `json:"type" protobuf:"bytes,1,name=type"` + // status is the status of the condition (True, False, Unknown) Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,name=status"` + // lastTransitionTime is the last time the condition transitioned from // one status to another // +optional LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // reason is the reason for the condition's last transition. // +optional Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // message is a human-readable explanation containing details about // the transition // +optional @@ -413,6 +440,7 @@ type ObjectMetricStatus struct { // metricName is the name of the metric in question. MetricName string `json:"metricName" protobuf:"bytes,2,name=metricName"` + // currentValue is the current value of the metric (as a quantity). CurrentValue resource.Quantity `json:"currentValue" protobuf:"bytes,3,name=currentValue"` @@ -421,6 +449,7 @@ type ObjectMetricStatus struct { // When unset, just the metricName will be used to gather metrics. // +optional Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,name=selector"` + // averageValue is the current value of the average of the // metric across all relevant pods (as a quantity) // +optional @@ -432,6 +461,7 @@ type ObjectMetricStatus struct { type PodsMetricStatus struct { // metricName is the name of the metric in question MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` + // currentAverageValue is the current value of the average of the // metric across all relevant pods (as a quantity) CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,2,name=currentAverageValue"` @@ -451,6 +481,7 @@ type PodsMetricStatus struct { type ResourceMetricStatus struct { // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // currentAverageUtilization is the current value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. It will only be @@ -458,6 +489,7 @@ type ResourceMetricStatus struct { // specification. // +optional CurrentAverageUtilization *int32 `json:"currentAverageUtilization,omitempty" protobuf:"bytes,2,opt,name=currentAverageUtilization"` + // currentAverageValue is the current value of the average of the // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. @@ -473,6 +505,7 @@ type ResourceMetricStatus struct { type ContainerResourceMetricStatus struct { // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // currentAverageUtilization is the current value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. It will only be @@ -480,11 +513,13 @@ type ContainerResourceMetricStatus struct { // specification. // +optional CurrentAverageUtilization *int32 `json:"currentAverageUtilization,omitempty" protobuf:"bytes,2,opt,name=currentAverageUtilization"` + // currentAverageValue is the current value of the average of the // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // It will always be set, regardless of the corresponding metric specification. CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,3,name=currentAverageValue"` + // container is the name of the container in the pods of the scaling taget Container string `json:"container" protobuf:"bytes,4,opt,name=container"` } @@ -495,12 +530,14 @@ type ExternalMetricStatus struct { // metricName is the name of a metric used for autoscaling in // metric system. MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` + // metricSelector is used to identify a specific time series // within a given metric. // +optional MetricSelector *metav1.LabelSelector `json:"metricSelector,omitempty" protobuf:"bytes,2,opt,name=metricSelector"` // currentValue is the current value of the metric (as a quantity) CurrentValue resource.Quantity `json:"currentValue" protobuf:"bytes,3,name=currentValue"` + // currentAverageValue is the current value of metric averaged over autoscaled pods. // +optional CurrentAverageValue *resource.Quantity `json:"currentAverageValue,omitempty" protobuf:"bytes,4,opt,name=currentAverageValue"` diff --git a/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go index ca288e9123..37c2b36a51 100644 --- a/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ContainerResourceMetricSource = map[string]string{ @@ -53,9 +53,9 @@ func (ContainerResourceMetricStatus) SwaggerDoc() map[string]string { var map_CrossVersionObjectReference = map[string]string{ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", - "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", - "apiVersion": "API version of the referent", + "kind": "kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "name": "name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "apiVersion": "apiVersion is the API version of the referent", } func (CrossVersionObjectReference) SwaggerDoc() map[string]string { @@ -89,8 +89,8 @@ func (ExternalMetricStatus) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscaler = map[string]string{ "": "configuration of a horizontal pod autoscaler.", "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", - "status": "current information about the autoscaler.", + "spec": "spec defines the behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", + "status": "status is the current information about the autoscaler.", } func (HorizontalPodAutoscaler) SwaggerDoc() map[string]string { @@ -113,7 +113,7 @@ func (HorizontalPodAutoscalerCondition) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscalerList = map[string]string{ "": "list of horizontal pod autoscaler objects.", "metadata": "Standard list metadata.", - "items": "list of horizontal pod autoscaler objects.", + "items": "items is the list of horizontal pod autoscaler objects.", } func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string { @@ -124,8 +124,8 @@ var map_HorizontalPodAutoscalerSpec = map[string]string{ "": "specification of a horizontal pod autoscaler.", "scaleTargetRef": "reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption and will set the desired number of pods by using its Scale subresource.", "minReplicas": "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.", - "maxReplicas": "upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.", - "targetCPUUtilizationPercentage": "target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified the default autoscaling policy will be used.", + "maxReplicas": "maxReplicas is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.", + "targetCPUUtilizationPercentage": "targetCPUUtilizationPercentage is the target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified the default autoscaling policy will be used.", } func (HorizontalPodAutoscalerSpec) SwaggerDoc() map[string]string { @@ -134,11 +134,11 @@ func (HorizontalPodAutoscalerSpec) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscalerStatus = map[string]string{ "": "current status of a horizontal pod autoscaler", - "observedGeneration": "most recent generation observed by this autoscaler.", - "lastScaleTime": "last time the HorizontalPodAutoscaler scaled the number of pods; used by the autoscaler to control how often the number of pods is changed.", - "currentReplicas": "current number of replicas of pods managed by this autoscaler.", - "desiredReplicas": "desired number of replicas of pods managed by this autoscaler.", - "currentCPUUtilizationPercentage": "current average CPU utilization over all pods, represented as a percentage of requested CPU, e.g. 70 means that an average pod is using now 70% of its requested CPU.", + "observedGeneration": "observedGeneration is the most recent generation observed by this autoscaler.", + "lastScaleTime": "lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods; used by the autoscaler to control how often the number of pods is changed.", + "currentReplicas": "currentReplicas is the current number of replicas of pods managed by this autoscaler.", + "desiredReplicas": "desiredReplicas is the desired number of replicas of pods managed by this autoscaler.", + "currentCPUUtilizationPercentage": "currentCPUUtilizationPercentage is the current average CPU utilization over all pods, represented as a percentage of requested CPU, e.g. 70 means that an average pod is using now 70% of its requested CPU.", } func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string { @@ -151,7 +151,7 @@ var map_MetricSpec = map[string]string{ "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "containerResource": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.", + "containerResource": "containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.", "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", } @@ -165,7 +165,7 @@ var map_MetricStatus = map[string]string{ "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "containerResource": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "containerResource": "containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", } @@ -246,8 +246,8 @@ func (ResourceMetricStatus) SwaggerDoc() map[string]string { var map_Scale = map[string]string{ "": "Scale represents a scaling request for a resource.", "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", - "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", - "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.", + "spec": "spec defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", + "status": "status is the current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.", } func (Scale) SwaggerDoc() map[string]string { @@ -256,7 +256,7 @@ func (Scale) SwaggerDoc() map[string]string { var map_ScaleSpec = map[string]string{ "": "ScaleSpec describes the attributes of a scale subresource.", - "replicas": "desired number of instances for the scaled object.", + "replicas": "replicas is the desired number of instances for the scaled object.", } func (ScaleSpec) SwaggerDoc() map[string]string { @@ -265,8 +265,8 @@ func (ScaleSpec) SwaggerDoc() map[string]string { var map_ScaleStatus = map[string]string{ "": "ScaleStatus represents the current status of a scale subresource.", - "replicas": "actual number of observed instances of the scaled object.", - "selector": "label query over pods that should match the replicas count. This is same as the label selector but in the string format to avoid introspection by clients. The string will be in the same format as the query-param syntax. More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors", + "replicas": "replicas is the actual number of observed instances of the scaled object.", + "selector": "selector is the label query over pods that should match the replicas count. This is same as the label selector but in the string format to avoid introspection by clients. The string will be in the same format as the query-param syntax. More info about label selectors: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/", } func (ScaleStatus) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/autoscaling/v2/generated.proto b/vendor/k8s.io/api/autoscaling/v2/generated.proto index c08328023e..a9e36975fc 100644 --- a/vendor/k8s.io/api/autoscaling/v2/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2/generated.proto @@ -54,25 +54,25 @@ message ContainerResourceMetricSource { // Kubernetes, and have special scaling options on top of those available to // normal per-pod metrics using the "pods" source. message ContainerResourceMetricStatus { - // Name is the name of the resource in question. + // name is the name of the resource in question. optional string name = 1; // current contains the current value for the given metric optional MetricValueStatus current = 2; - // Container is the name of the container in the pods of the scaling target + // container is the name of the container in the pods of the scaling target optional string container = 3; } // CrossVersionObjectReference contains enough information to let you identify the referred resource. message CrossVersionObjectReference { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds optional string kind = 1; - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + // name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names optional string name = 2; - // API version of the referent + // apiVersion is the API version of the referent // +optional optional string apiVersion = 3; } @@ -100,14 +100,14 @@ message ExternalMetricStatus { // HPAScalingPolicy is a single policy which must hold true for a specified past interval. message HPAScalingPolicy { - // Type is used to specify the scaling policy. + // type is used to specify the scaling policy. optional string type = 1; - // Value contains the amount of change which is permitted by the policy. + // value contains the amount of change which is permitted by the policy. // It must be greater than zero optional int32 value = 2; - // PeriodSeconds specifies the window of time for which the policy should hold true. + // periodSeconds specifies the window of time for which the policy should hold true. // PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min). optional int32 periodSeconds = 3; } @@ -119,7 +119,7 @@ message HPAScalingPolicy { // number of replicas is not set instantly, instead, the safest value from the stabilization // window is chosen. message HPAScalingRules { - // StabilizationWindowSeconds is the number of seconds for which past recommendations should be + // stabilizationWindowSeconds is the number of seconds for which past recommendations should be // considered while scaling up or scaling down. // StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). // If not set, use the default values: @@ -495,7 +495,7 @@ message ResourceMetricSource { // Kubernetes, and have special scaling options on top of those available to // normal per-pod metrics using the "pods" source. message ResourceMetricStatus { - // Name is the name of the resource in question. + // name is the name of the resource in question. optional string name = 1; // current contains the current value for the given metric diff --git a/vendor/k8s.io/api/autoscaling/v2/types.go b/vendor/k8s.io/api/autoscaling/v2/types.go index 9b2dc36e3e..c12a83df1b 100644 --- a/vendor/k8s.io/api/autoscaling/v2/types.go +++ b/vendor/k8s.io/api/autoscaling/v2/types.go @@ -59,9 +59,11 @@ type HorizontalPodAutoscalerSpec struct { // available. // +optional MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` + // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. // It cannot be less that minReplicas. MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"` + // metrics contains the specifications for which to use to calculate the // desired replica count (the maximum replica count across all metrics will // be used). The desired replica count is calculated multiplying the @@ -83,11 +85,13 @@ type HorizontalPodAutoscalerSpec struct { // CrossVersionObjectReference contains enough information to let you identify the referred resource. type CrossVersionObjectReference struct { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + + // name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names Name string `json:"name" protobuf:"bytes,2,opt,name=name"` - // API version of the referent + + // apiVersion is the API version of the referent // +optional APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` } @@ -105,11 +109,13 @@ type MetricSpec struct { // (for example, hits-per-second on an Ingress object). // +optional Object *ObjectMetricSource `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target // (for example, transactions-processed-per-second). The values will be // averaged together before being compared to the target value. // +optional Pods *PodsMetricSource `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to @@ -117,6 +123,7 @@ type MetricSpec struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricSource `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` + // containerResource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing a single container in // each pod of the current scale target (e.g. CPU or memory). Such metrics are @@ -125,6 +132,7 @@ type MetricSpec struct { // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. // +optional ContainerResource *ContainerResourceMetricSource `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -144,6 +152,7 @@ type HorizontalPodAutoscalerBehavior struct { // No stabilization is used. // +optional ScaleUp *HPAScalingRules `json:"scaleUp,omitempty" protobuf:"bytes,1,opt,name=scaleUp"` + // scaleDown is scaling policy for scaling Down. // If not set, the default value is to allow to scale down to minReplicas pods, with a // 300 second stabilization window (i.e., the highest recommendation for @@ -171,7 +180,7 @@ const ( // number of replicas is not set instantly, instead, the safest value from the stabilization // window is chosen. type HPAScalingRules struct { - // StabilizationWindowSeconds is the number of seconds for which past recommendations should be + // stabilizationWindowSeconds is the number of seconds for which past recommendations should be // considered while scaling up or scaling down. // StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). // If not set, use the default values: @@ -179,10 +188,12 @@ type HPAScalingRules struct { // - For scale down: 300 (i.e. the stabilization window is 300 seconds long). // +optional StabilizationWindowSeconds *int32 `json:"stabilizationWindowSeconds,omitempty" protobuf:"varint,3,opt,name=stabilizationWindowSeconds"` + // selectPolicy is used to specify which policy should be used. // If not set, the default value Max is used. // +optional SelectPolicy *ScalingPolicySelect `json:"selectPolicy,omitempty" protobuf:"bytes,1,opt,name=selectPolicy"` + // policies is a list of potential scaling polices which can be used during scaling. // At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid // +listType=atomic @@ -203,12 +214,14 @@ const ( // HPAScalingPolicy is a single policy which must hold true for a specified past interval. type HPAScalingPolicy struct { - // Type is used to specify the scaling policy. + // type is used to specify the scaling policy. Type HPAScalingPolicyType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=HPAScalingPolicyType"` - // Value contains the amount of change which is permitted by the policy. + + // value contains the amount of change which is permitted by the policy. // It must be greater than zero Value int32 `json:"value" protobuf:"varint,2,opt,name=value"` - // PeriodSeconds specifies the window of time for which the policy should hold true. + + // periodSeconds specifies the window of time for which the policy should hold true. // PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min). PeriodSeconds int32 `json:"periodSeconds" protobuf:"varint,3,opt,name=periodSeconds"` } @@ -249,8 +262,10 @@ const ( type ObjectMetricSource struct { // describedObject specifies the descriptions of a object,such as kind,name apiVersion DescribedObject CrossVersionObjectReference `json:"describedObject" protobuf:"bytes,1,name=describedObject"` + // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` + // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,3,name=metric"` } @@ -262,6 +277,7 @@ type ObjectMetricSource struct { type PodsMetricSource struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` } @@ -276,6 +292,7 @@ type PodsMetricSource struct { type ResourceMetricSource struct { // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` } @@ -290,8 +307,10 @@ type ResourceMetricSource struct { type ContainerResourceMetricSource struct { // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` + // container is the name of the container in the pods of the scaling target Container string `json:"container" protobuf:"bytes,3,opt,name=container"` } @@ -302,6 +321,7 @@ type ContainerResourceMetricSource struct { type ExternalMetricSource struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` } @@ -310,6 +330,7 @@ type ExternalMetricSource struct { type MetricIdentifier struct { // name is the name of the given metric Name string `json:"name" protobuf:"bytes,1,name=name"` + // selector is the string-encoded form of a standard kubernetes label selector for the given metric // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. // When unset, just the metricName will be used to gather metrics. @@ -321,13 +342,16 @@ type MetricIdentifier struct { type MetricTarget struct { // type represents whether the metric type is Utilization, Value, or AverageValue Type MetricTargetType `json:"type" protobuf:"bytes,1,name=type"` + // value is the target value of the metric (as a quantity). // +optional Value *resource.Quantity `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` + // averageValue is the target value of the average of the // metric across all relevant pods (as a quantity) // +optional AverageValue *resource.Quantity `json:"averageValue,omitempty" protobuf:"bytes,3,opt,name=averageValue"` + // averageUtilization is the target value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. @@ -405,15 +429,19 @@ const ( type HorizontalPodAutoscalerCondition struct { // type describes the current condition Type HorizontalPodAutoscalerConditionType `json:"type" protobuf:"bytes,1,name=type"` + // status is the status of the condition (True, False, Unknown) Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,name=status"` + // lastTransitionTime is the last time the condition transitioned from // one status to another // +optional LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // reason is the reason for the condition's last transition. // +optional Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // message is a human-readable explanation containing details about // the transition // +optional @@ -432,11 +460,13 @@ type MetricStatus struct { // (for example, hits-per-second on an Ingress object). // +optional Object *ObjectMetricStatus `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target // (for example, transactions-processed-per-second). The values will be // averaged together before being compared to the target value. // +optional Pods *PodsMetricStatus `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to @@ -444,6 +474,7 @@ type MetricStatus struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricStatus `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` + // container resource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing a single container in each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to @@ -451,6 +482,7 @@ type MetricStatus struct { // to normal per-pod metrics using the "pods" source. // +optional ContainerResource *ContainerResourceMetricStatus `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -465,8 +497,10 @@ type MetricStatus struct { type ObjectMetricStatus struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` + // DescribedObject specifies the descriptions of a object,such as kind,name apiVersion DescribedObject CrossVersionObjectReference `json:"describedObject" protobuf:"bytes,3,name=describedObject"` } @@ -476,6 +510,7 @@ type ObjectMetricStatus struct { type PodsMetricStatus struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` } @@ -486,8 +521,9 @@ type PodsMetricStatus struct { // Kubernetes, and have special scaling options on top of those available to // normal per-pod metrics using the "pods" source. type ResourceMetricStatus struct { - // Name is the name of the resource in question. + // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` } @@ -498,11 +534,13 @@ type ResourceMetricStatus struct { // Kubernetes, and have special scaling options on top of those available to // normal per-pod metrics using the "pods" source. type ContainerResourceMetricStatus struct { - // Name is the name of the resource in question. + // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` - // Container is the name of the container in the pods of the scaling target + + // container is the name of the container in the pods of the scaling target Container string `json:"container" protobuf:"bytes,3,opt,name=container"` } @@ -511,6 +549,7 @@ type ContainerResourceMetricStatus struct { type ExternalMetricStatus struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` } @@ -520,10 +559,12 @@ type MetricValueStatus struct { // value is the current value of the metric (as a quantity). // +optional Value *resource.Quantity `json:"value,omitempty" protobuf:"bytes,1,opt,name=value"` + // averageValue is the current value of the average of the // metric across all relevant pods (as a quantity) // +optional AverageValue *resource.Quantity `json:"averageValue,omitempty" protobuf:"bytes,2,opt,name=averageValue"` + // currentAverageUtilization is the current value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. diff --git a/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go index 41ab32a4c7..1941b1ef57 100644 --- a/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v2 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ContainerResourceMetricSource = map[string]string{ @@ -40,9 +40,9 @@ func (ContainerResourceMetricSource) SwaggerDoc() map[string]string { var map_ContainerResourceMetricStatus = map[string]string{ "": "ContainerResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "name": "Name is the name of the resource in question.", + "name": "name is the name of the resource in question.", "current": "current contains the current value for the given metric", - "container": "Container is the name of the container in the pods of the scaling target", + "container": "container is the name of the container in the pods of the scaling target", } func (ContainerResourceMetricStatus) SwaggerDoc() map[string]string { @@ -51,9 +51,9 @@ func (ContainerResourceMetricStatus) SwaggerDoc() map[string]string { var map_CrossVersionObjectReference = map[string]string{ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", - "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", - "apiVersion": "API version of the referent", + "kind": "kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "name": "name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "apiVersion": "apiVersion is the API version of the referent", } func (CrossVersionObjectReference) SwaggerDoc() map[string]string { @@ -82,9 +82,9 @@ func (ExternalMetricStatus) SwaggerDoc() map[string]string { var map_HPAScalingPolicy = map[string]string{ "": "HPAScalingPolicy is a single policy which must hold true for a specified past interval.", - "type": "Type is used to specify the scaling policy.", - "value": "Value contains the amount of change which is permitted by the policy. It must be greater than zero", - "periodSeconds": "PeriodSeconds specifies the window of time for which the policy should hold true. PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min).", + "type": "type is used to specify the scaling policy.", + "value": "value contains the amount of change which is permitted by the policy. It must be greater than zero", + "periodSeconds": "periodSeconds specifies the window of time for which the policy should hold true. PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min).", } func (HPAScalingPolicy) SwaggerDoc() map[string]string { @@ -93,7 +93,7 @@ func (HPAScalingPolicy) SwaggerDoc() map[string]string { var map_HPAScalingRules = map[string]string{ "": "HPAScalingRules configures the scaling behavior for one direction. These Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen.", - "stabilizationWindowSeconds": "StabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long).", + "stabilizationWindowSeconds": "stabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long).", "selectPolicy": "selectPolicy is used to specify which policy should be used. If not set, the default value Max is used.", "policies": "policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid", } @@ -288,7 +288,7 @@ func (ResourceMetricSource) SwaggerDoc() map[string]string { var map_ResourceMetricStatus = map[string]string{ "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "name": "Name is the name of the resource in question.", + "name": "name is the name of the resource in question.", "current": "current contains the current value for the given metric", } diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto index 33d27a9622..6b3d415212 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto @@ -89,7 +89,7 @@ message CrossVersionObjectReference { // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds optional string kind = 1; - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + // Name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names optional string name = 2; // API version of the referent diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/types.go b/vendor/k8s.io/api/autoscaling/v2beta1/types.go index c1480ab39f..842284072d 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/types.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/types.go @@ -26,7 +26,7 @@ import ( type CrossVersionObjectReference struct { // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + // Name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names Name string `json:"name" protobuf:"bytes,2,opt,name=name"` // API version of the referent // +optional diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go index 6f555487dc..d656ee416d 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v2beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ContainerResourceMetricSource = map[string]string{ @@ -54,7 +54,7 @@ func (ContainerResourceMetricStatus) SwaggerDoc() map[string]string { var map_CrossVersionObjectReference = map[string]string{ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "name": "Name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", "apiVersion": "API version of the referent", } diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto index 1bafbf6c74..5b2fe9442a 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto @@ -54,25 +54,25 @@ message ContainerResourceMetricSource { // Kubernetes, and have special scaling options on top of those available to // normal per-pod metrics using the "pods" source. message ContainerResourceMetricStatus { - // Name is the name of the resource in question. + // name is the name of the resource in question. optional string name = 1; // current contains the current value for the given metric optional MetricValueStatus current = 2; - // Container is the name of the container in the pods of the scaling target + // container is the name of the container in the pods of the scaling target optional string container = 3; } // CrossVersionObjectReference contains enough information to let you identify the referred resource. message CrossVersionObjectReference { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds optional string kind = 1; - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + // name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names optional string name = 2; - // API version of the referent + // apiVersion is the API version of the referent // +optional optional string apiVersion = 3; } @@ -100,14 +100,14 @@ message ExternalMetricStatus { // HPAScalingPolicy is a single policy which must hold true for a specified past interval. message HPAScalingPolicy { - // Type is used to specify the scaling policy. + // type is used to specify the scaling policy. optional string type = 1; - // Value contains the amount of change which is permitted by the policy. + // value contains the amount of change which is permitted by the policy. // It must be greater than zero optional int32 value = 2; - // PeriodSeconds specifies the window of time for which the policy should hold true. + // periodSeconds specifies the window of time for which the policy should hold true. // PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min). optional int32 periodSeconds = 3; } @@ -119,7 +119,7 @@ message HPAScalingPolicy { // number of replicas is not set instantly, instead, the safest value from the stabilization // window is chosen. message HPAScalingRules { - // StabilizationWindowSeconds is the number of seconds for which past recommendations should be + // stabilizationWindowSeconds is the number of seconds for which past recommendations should be // considered while scaling up or scaling down. // StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). // If not set, use the default values: @@ -361,7 +361,7 @@ message MetricStatus { // +optional optional ResourceMetricStatus resource = 4; - // container resource refers to a resource metric (such as those specified in + // containerResource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing a single container in each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to // Kubernetes, and have special scaling options on top of those available @@ -411,7 +411,7 @@ message MetricValueStatus { // +optional optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 2; - // currentAverageUtilization is the current value of the average of the + // averageUtilization is the current value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. // +optional @@ -485,7 +485,7 @@ message ResourceMetricSource { // Kubernetes, and have special scaling options on top of those available to // normal per-pod metrics using the "pods" source. message ResourceMetricStatus { - // Name is the name of the resource in question. + // name is the name of the resource in question. optional string name = 1; // current contains the current value for the given metric diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/types.go b/vendor/k8s.io/api/autoscaling/v2beta2/types.go index 60da3ba049..b0b7681c0e 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/types.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/types.go @@ -62,9 +62,11 @@ type HorizontalPodAutoscalerSpec struct { // available. // +optional MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` + // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. // It cannot be less that minReplicas. MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"` + // metrics contains the specifications for which to use to calculate the // desired replica count (the maximum replica count across all metrics will // be used). The desired replica count is calculated multiplying the @@ -85,11 +87,13 @@ type HorizontalPodAutoscalerSpec struct { // CrossVersionObjectReference contains enough information to let you identify the referred resource. type CrossVersionObjectReference struct { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + + // name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names Name string `json:"name" protobuf:"bytes,2,opt,name=name"` - // API version of the referent + + // apiVersion is the API version of the referent // +optional APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` } @@ -107,11 +111,13 @@ type MetricSpec struct { // (for example, hits-per-second on an Ingress object). // +optional Object *ObjectMetricSource `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target // (for example, transactions-processed-per-second). The values will be // averaged together before being compared to the target value. // +optional Pods *PodsMetricSource `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to @@ -119,6 +125,7 @@ type MetricSpec struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricSource `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` + // container resource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing a single container in // each pod of the current scale target (e.g. CPU or memory). Such metrics are @@ -127,6 +134,7 @@ type MetricSpec struct { // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. // +optional ContainerResource *ContainerResourceMetricSource `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -146,6 +154,7 @@ type HorizontalPodAutoscalerBehavior struct { // No stabilization is used. // +optional ScaleUp *HPAScalingRules `json:"scaleUp,omitempty" protobuf:"bytes,1,opt,name=scaleUp"` + // scaleDown is scaling policy for scaling Down. // If not set, the default value is to allow to scale down to minReplicas pods, with a // 300 second stabilization window (i.e., the highest recommendation for @@ -173,7 +182,7 @@ const ( // number of replicas is not set instantly, instead, the safest value from the stabilization // window is chosen. type HPAScalingRules struct { - // StabilizationWindowSeconds is the number of seconds for which past recommendations should be + // stabilizationWindowSeconds is the number of seconds for which past recommendations should be // considered while scaling up or scaling down. // StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). // If not set, use the default values: @@ -181,10 +190,12 @@ type HPAScalingRules struct { // - For scale down: 300 (i.e. the stabilization window is 300 seconds long). // +optional StabilizationWindowSeconds *int32 `json:"stabilizationWindowSeconds,omitempty" protobuf:"varint,3,opt,name=stabilizationWindowSeconds"` + // selectPolicy is used to specify which policy should be used. // If not set, the default value MaxPolicySelect is used. // +optional SelectPolicy *ScalingPolicySelect `json:"selectPolicy,omitempty" protobuf:"bytes,1,opt,name=selectPolicy"` + // policies is a list of potential scaling polices which can be used during scaling. // At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid // +optional @@ -204,12 +215,14 @@ const ( // HPAScalingPolicy is a single policy which must hold true for a specified past interval. type HPAScalingPolicy struct { - // Type is used to specify the scaling policy. + // type is used to specify the scaling policy. Type HPAScalingPolicyType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=HPAScalingPolicyType"` - // Value contains the amount of change which is permitted by the policy. + + // value contains the amount of change which is permitted by the policy. // It must be greater than zero Value int32 `json:"value" protobuf:"varint,2,opt,name=value"` - // PeriodSeconds specifies the window of time for which the policy should hold true. + + // periodSeconds specifies the window of time for which the policy should hold true. // PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min). PeriodSeconds int32 `json:"periodSeconds" protobuf:"varint,3,opt,name=periodSeconds"` } @@ -251,6 +264,7 @@ type ObjectMetricSource struct { DescribedObject CrossVersionObjectReference `json:"describedObject" protobuf:"bytes,1,name=describedObject"` // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` + // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,3,name=metric"` } @@ -262,6 +276,7 @@ type ObjectMetricSource struct { type PodsMetricSource struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` } @@ -276,6 +291,7 @@ type PodsMetricSource struct { type ResourceMetricSource struct { // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` } @@ -290,8 +306,10 @@ type ResourceMetricSource struct { type ContainerResourceMetricSource struct { // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` + // container is the name of the container in the pods of the scaling target Container string `json:"container" protobuf:"bytes,3,opt,name=container"` } @@ -302,6 +320,7 @@ type ContainerResourceMetricSource struct { type ExternalMetricSource struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` } @@ -310,6 +329,7 @@ type ExternalMetricSource struct { type MetricIdentifier struct { // name is the name of the given metric Name string `json:"name" protobuf:"bytes,1,name=name"` + // selector is the string-encoded form of a standard kubernetes label selector for the given metric // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. // When unset, just the metricName will be used to gather metrics. @@ -321,13 +341,16 @@ type MetricIdentifier struct { type MetricTarget struct { // type represents whether the metric type is Utilization, Value, or AverageValue Type MetricTargetType `json:"type" protobuf:"bytes,1,name=type"` + // value is the target value of the metric (as a quantity). // +optional Value *resource.Quantity `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` + // averageValue is the target value of the average of the // metric across all relevant pods (as a quantity) // +optional AverageValue *resource.Quantity `json:"averageValue,omitempty" protobuf:"bytes,3,opt,name=averageValue"` + // averageUtilization is the target value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. @@ -399,15 +422,19 @@ const ( type HorizontalPodAutoscalerCondition struct { // type describes the current condition Type HorizontalPodAutoscalerConditionType `json:"type" protobuf:"bytes,1,name=type"` + // status is the status of the condition (True, False, Unknown) Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,name=status"` + // lastTransitionTime is the last time the condition transitioned from // one status to another // +optional LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // reason is the reason for the condition's last transition. // +optional Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // message is a human-readable explanation containing details about // the transition // +optional @@ -426,6 +453,7 @@ type MetricStatus struct { // (for example, hits-per-second on an Ingress object). // +optional Object *ObjectMetricStatus `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target // (for example, transactions-processed-per-second). The values will be // averaged together before being compared to the target value. @@ -438,13 +466,15 @@ type MetricStatus struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricStatus `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` - // container resource refers to a resource metric (such as those specified in + + // containerResource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing a single container in each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to // Kubernetes, and have special scaling options on top of those available // to normal per-pod metrics using the "pods" source. // +optional ContainerResource *ContainerResourceMetricStatus `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -459,6 +489,7 @@ type MetricStatus struct { type ObjectMetricStatus struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` @@ -470,6 +501,7 @@ type ObjectMetricStatus struct { type PodsMetricStatus struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` } @@ -480,8 +512,9 @@ type PodsMetricStatus struct { // Kubernetes, and have special scaling options on top of those available to // normal per-pod metrics using the "pods" source. type ResourceMetricStatus struct { - // Name is the name of the resource in question. + // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` } @@ -492,11 +525,13 @@ type ResourceMetricStatus struct { // Kubernetes, and have special scaling options on top of those available to // normal per-pod metrics using the "pods" source. type ContainerResourceMetricStatus struct { - // Name is the name of the resource in question. + // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` - // Container is the name of the container in the pods of the scaling target + + // container is the name of the container in the pods of the scaling target Container string `json:"container" protobuf:"bytes,3,opt,name=container"` } @@ -505,6 +540,7 @@ type ContainerResourceMetricStatus struct { type ExternalMetricStatus struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` } @@ -514,11 +550,13 @@ type MetricValueStatus struct { // value is the current value of the metric (as a quantity). // +optional Value *resource.Quantity `json:"value,omitempty" protobuf:"bytes,1,opt,name=value"` + // averageValue is the current value of the average of the // metric across all relevant pods (as a quantity) // +optional AverageValue *resource.Quantity `json:"averageValue,omitempty" protobuf:"bytes,2,opt,name=averageValue"` - // currentAverageUtilization is the current value of the average of the + + // averageUtilization is the current value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. // +optional diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go index cb92e9e345..4af7d0ec0d 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v2beta2 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ContainerResourceMetricSource = map[string]string{ @@ -40,9 +40,9 @@ func (ContainerResourceMetricSource) SwaggerDoc() map[string]string { var map_ContainerResourceMetricStatus = map[string]string{ "": "ContainerResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "name": "Name is the name of the resource in question.", + "name": "name is the name of the resource in question.", "current": "current contains the current value for the given metric", - "container": "Container is the name of the container in the pods of the scaling target", + "container": "container is the name of the container in the pods of the scaling target", } func (ContainerResourceMetricStatus) SwaggerDoc() map[string]string { @@ -51,9 +51,9 @@ func (ContainerResourceMetricStatus) SwaggerDoc() map[string]string { var map_CrossVersionObjectReference = map[string]string{ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", - "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", - "apiVersion": "API version of the referent", + "kind": "kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "name": "name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "apiVersion": "apiVersion is the API version of the referent", } func (CrossVersionObjectReference) SwaggerDoc() map[string]string { @@ -82,9 +82,9 @@ func (ExternalMetricStatus) SwaggerDoc() map[string]string { var map_HPAScalingPolicy = map[string]string{ "": "HPAScalingPolicy is a single policy which must hold true for a specified past interval.", - "type": "Type is used to specify the scaling policy.", - "value": "Value contains the amount of change which is permitted by the policy. It must be greater than zero", - "periodSeconds": "PeriodSeconds specifies the window of time for which the policy should hold true. PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min).", + "type": "type is used to specify the scaling policy.", + "value": "value contains the amount of change which is permitted by the policy. It must be greater than zero", + "periodSeconds": "periodSeconds specifies the window of time for which the policy should hold true. PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min).", } func (HPAScalingPolicy) SwaggerDoc() map[string]string { @@ -93,7 +93,7 @@ func (HPAScalingPolicy) SwaggerDoc() map[string]string { var map_HPAScalingRules = map[string]string{ "": "HPAScalingRules configures the scaling behavior for one direction. These Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen.", - "stabilizationWindowSeconds": "StabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long).", + "stabilizationWindowSeconds": "stabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long).", "selectPolicy": "selectPolicy is used to specify which policy should be used. If not set, the default value MaxPolicySelect is used.", "policies": "policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid", } @@ -203,7 +203,7 @@ var map_MetricStatus = map[string]string{ "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "containerResource": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "containerResource": "containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", } @@ -227,7 +227,7 @@ var map_MetricValueStatus = map[string]string{ "": "MetricValueStatus holds the current value for a metric", "value": "value is the current value of the metric (as a quantity).", "averageValue": "averageValue is the current value of the average of the metric across all relevant pods (as a quantity)", - "averageUtilization": "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.", + "averageUtilization": "averageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.", } func (MetricValueStatus) SwaggerDoc() map[string]string { @@ -286,7 +286,7 @@ func (ResourceMetricSource) SwaggerDoc() map[string]string { var map_ResourceMetricStatus = map[string]string{ "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "name": "Name is the name of the resource in question.", + "name": "name is the name of the resource in question.", "current": "current contains the current value for the given metric", } diff --git a/vendor/k8s.io/api/batch/v1/generated.proto b/vendor/k8s.io/api/batch/v1/generated.proto index 5e8159731b..a21b5f1b37 100644 --- a/vendor/k8s.io/api/batch/v1/generated.proto +++ b/vendor/k8s.io/api/batch/v1/generated.proto @@ -72,7 +72,6 @@ message CronJobSpec { // configuration, the controller will stop creating new new Jobs and will create a system event with the // reason UnknownTimeZone. // More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones - // This is beta field and must be enabled via the `CronJobTimeZone` feature gate. // +optional optional string timeZone = 8; @@ -83,6 +82,7 @@ message CronJobSpec { // Specifies how to treat concurrent executions of a Job. // Valid values are: + // // - "Allow" (default): allows CronJobs to run concurrently; // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; // - "Replace": cancels currently running job and replaces it with a new one @@ -189,7 +189,7 @@ message JobSpec { optional int32 parallelism = 1; // Specifies the desired number of successfully finished pods the - // job should be run with. Setting to nil means that the success of any + // job should be run with. Setting to null means that the success of any // pod signals the success of all pods, and allows parallelism to have any positive // value. Setting to 1 means that parallelism is limited to 1 and the success of that // pod signals the success of the job. @@ -243,6 +243,7 @@ message JobSpec { optional bool manualSelector = 5; // Describes the pod that will be created when executing a job. + // The only allowed template.spec.restartPolicy values are "Never" or "OnFailure". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ optional k8s.io.api.core.v1.PodTemplateSpec template = 6; @@ -256,7 +257,7 @@ message JobSpec { // +optional optional int32 ttlSecondsAfterFinished = 8; - // CompletionMode specifies how Pod completions are tracked. It can be + // completionMode specifies how Pod completions are tracked. It can be // `NonIndexed` (default) or `Indexed`. // // `NonIndexed` means that the Job is considered complete when there have @@ -281,7 +282,7 @@ message JobSpec { // +optional optional string completionMode = 9; - // Suspend specifies whether the Job controller should create Pods or not. If + // suspend specifies whether the Job controller should create Pods or not. If // a Job is created with suspend set to true, no Pods are created by the Job // controller. If a Job is suspended after creation (i.e. the flag goes from // false to true), the Job controller will delete all active Pods associated @@ -334,7 +335,7 @@ message JobStatus { // +optional optional int32 failed = 6; - // CompletedIndexes holds the completed indexes when .spec.completionMode = + // completedIndexes holds the completed indexes when .spec.completionMode = // "Indexed" in a text format. The indexes are represented as decimal integers // separated by commas. The numbers are listed in increasing order. Three or // more consecutive numbers are compressed and represented by the first and @@ -344,15 +345,16 @@ message JobStatus { // +optional optional string completedIndexes = 7; - // UncountedTerminatedPods holds the UIDs of Pods that have terminated but + // uncountedTerminatedPods holds the UIDs of Pods that have terminated but // the job controller hasn't yet accounted for in the status counters. // // The job controller creates pods with a finalizer. When a pod terminates // (succeeded or failed), the controller does three steps to account for it // in the job status: - // (1) Add the pod UID to the arrays in this field. - // (2) Remove the pod finalizer. - // (3) Remove the pod UID from the arrays while increasing the corresponding + // + // 1. Add the pod UID to the arrays in this field. + // 2. Remove the pod finalizer. + // 3. Remove the pod UID from the arrays while increasing the corresponding // counter. // // Old jobs might not be tracked using this field, in which case the field @@ -409,6 +411,7 @@ message PodFailurePolicyOnExitCodesRequirement { // Represents the relationship between the container exit code(s) and the // specified values. Containers completed with success (exit code 0) are // excluded from the requirement check. Possible values are: + // // - In: the requirement is satisfied if at least one container exit code // (might be multiple if there are multiple containers not restricted // by the 'containerName' field) is in the set of specified values. @@ -442,10 +445,11 @@ message PodFailurePolicyOnPodConditionsPattern { } // PodFailurePolicyRule describes how a pod failure is handled when the requirements are met. -// One of OnExitCodes and onPodConditions, but not both, can be used in each rule. +// One of onExitCodes and onPodConditions, but not both, can be used in each rule. message PodFailurePolicyRule { // Specifies the action taken on a pod failure when the requirements are satisfied. // Possible values are: + // // - FailJob: indicates that the pod's job is marked as Failed and all // running pods are terminated. // - Ignore: indicates that the counter towards the .backoffLimit is not @@ -471,12 +475,12 @@ message PodFailurePolicyRule { // UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't // been accounted in Job status counters. message UncountedTerminatedPods { - // Succeeded holds UIDs of succeeded Pods. + // succeeded holds UIDs of succeeded Pods. // +listType=set // +optional repeated string succeeded = 1; - // Failed holds UIDs of failed Pods. + // failed holds UIDs of failed Pods. // +listType=set // +optional repeated string failed = 2; diff --git a/vendor/k8s.io/api/batch/v1/types.go b/vendor/k8s.io/api/batch/v1/types.go index d298a02f25..c12e91b06c 100644 --- a/vendor/k8s.io/api/batch/v1/types.go +++ b/vendor/k8s.io/api/batch/v1/types.go @@ -23,8 +23,11 @@ import ( ) const ( - JobCompletionIndexAnnotation = "batch.kubernetes.io/job-completion-index" + // All Kubernetes labels need to be prefixed with Kubernetes to distinguish them from end-user labels + // More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#label-selector-and-annotation-conventions + labelPrefix = "batch.kubernetes.io/" + JobCompletionIndexAnnotation = labelPrefix + "job-completion-index" // JobTrackingFinalizer is a finalizer for Job's pods. It prevents them from // being deleted before being accounted in the Job status. // @@ -34,7 +37,14 @@ const ( // 1.27+, one release after JobTrackingWithFinalizers graduates to GA, the // apiserver and job controller will ignore this annotation and they will // always track jobs using finalizers. - JobTrackingFinalizer = "batch.kubernetes.io/job-tracking" + JobTrackingFinalizer = labelPrefix + "job-tracking" + // The Job labels will use batch.kubernetes.io as a prefix for all labels + // Historically the job controller uses unprefixed labels for job-name and controller-uid and + // Kubernetes continutes to recognize those unprefixed labels for consistency. + JobNameLabel = labelPrefix + "job-name" + // ControllerUid is used to programatically get pods corresponding to a Job. + // There is a corresponding label without the batch.kubernetes.io that we support for legacy reasons. + ControllerUidLabel = labelPrefix + "controller-uid" ) // +genclient @@ -135,6 +145,7 @@ type PodFailurePolicyOnExitCodesRequirement struct { // Represents the relationship between the container exit code(s) and the // specified values. Containers completed with success (exit code 0) are // excluded from the requirement check. Possible values are: + // // - In: the requirement is satisfied if at least one container exit code // (might be multiple if there are multiple containers not restricted // by the 'containerName' field) is in the set of specified values. @@ -168,10 +179,11 @@ type PodFailurePolicyOnPodConditionsPattern struct { } // PodFailurePolicyRule describes how a pod failure is handled when the requirements are met. -// One of OnExitCodes and onPodConditions, but not both, can be used in each rule. +// One of onExitCodes and onPodConditions, but not both, can be used in each rule. type PodFailurePolicyRule struct { // Specifies the action taken on a pod failure when the requirements are satisfied. // Possible values are: + // // - FailJob: indicates that the pod's job is marked as Failed and all // running pods are terminated. // - Ignore: indicates that the counter towards the .backoffLimit is not @@ -217,7 +229,7 @@ type JobSpec struct { Parallelism *int32 `json:"parallelism,omitempty" protobuf:"varint,1,opt,name=parallelism"` // Specifies the desired number of successfully finished pods the - // job should be run with. Setting to nil means that the success of any + // job should be run with. Setting to null means that the success of any // pod signals the success of all pods, and allows parallelism to have any positive // value. Setting to 1 means that parallelism is limited to 1 and the success of that // pod signals the success of the job. @@ -276,6 +288,7 @@ type JobSpec struct { ManualSelector *bool `json:"manualSelector,omitempty" protobuf:"varint,5,opt,name=manualSelector"` // Describes the pod that will be created when executing a job. + // The only allowed template.spec.restartPolicy values are "Never" or "OnFailure". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ Template corev1.PodTemplateSpec `json:"template" protobuf:"bytes,6,opt,name=template"` @@ -289,7 +302,7 @@ type JobSpec struct { // +optional TTLSecondsAfterFinished *int32 `json:"ttlSecondsAfterFinished,omitempty" protobuf:"varint,8,opt,name=ttlSecondsAfterFinished"` - // CompletionMode specifies how Pod completions are tracked. It can be + // completionMode specifies how Pod completions are tracked. It can be // `NonIndexed` (default) or `Indexed`. // // `NonIndexed` means that the Job is considered complete when there have @@ -314,7 +327,7 @@ type JobSpec struct { // +optional CompletionMode *CompletionMode `json:"completionMode,omitempty" protobuf:"bytes,9,opt,name=completionMode,casttype=CompletionMode"` - // Suspend specifies whether the Job controller should create Pods or not. If + // suspend specifies whether the Job controller should create Pods or not. If // a Job is created with suspend set to true, no Pods are created by the Job // controller. If a Job is suspended after creation (i.e. the flag goes from // false to true), the Job controller will delete all active Pods associated @@ -367,7 +380,7 @@ type JobStatus struct { // +optional Failed int32 `json:"failed,omitempty" protobuf:"varint,6,opt,name=failed"` - // CompletedIndexes holds the completed indexes when .spec.completionMode = + // completedIndexes holds the completed indexes when .spec.completionMode = // "Indexed" in a text format. The indexes are represented as decimal integers // separated by commas. The numbers are listed in increasing order. Three or // more consecutive numbers are compressed and represented by the first and @@ -377,15 +390,16 @@ type JobStatus struct { // +optional CompletedIndexes string `json:"completedIndexes,omitempty" protobuf:"bytes,7,opt,name=completedIndexes"` - // UncountedTerminatedPods holds the UIDs of Pods that have terminated but + // uncountedTerminatedPods holds the UIDs of Pods that have terminated but // the job controller hasn't yet accounted for in the status counters. // // The job controller creates pods with a finalizer. When a pod terminates // (succeeded or failed), the controller does three steps to account for it // in the job status: - // (1) Add the pod UID to the arrays in this field. - // (2) Remove the pod finalizer. - // (3) Remove the pod UID from the arrays while increasing the corresponding + // + // 1. Add the pod UID to the arrays in this field. + // 2. Remove the pod finalizer. + // 3. Remove the pod UID from the arrays while increasing the corresponding // counter. // // Old jobs might not be tracked using this field, in which case the field @@ -404,12 +418,12 @@ type JobStatus struct { // UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't // been accounted in Job status counters. type UncountedTerminatedPods struct { - // Succeeded holds UIDs of succeeded Pods. + // succeeded holds UIDs of succeeded Pods. // +listType=set // +optional Succeeded []types.UID `json:"succeeded,omitempty" protobuf:"bytes,1,rep,name=succeeded,casttype=k8s.io/apimachinery/pkg/types.UID"` - // Failed holds UIDs of failed Pods. + // failed holds UIDs of failed Pods. // +listType=set // +optional Failed []types.UID `json:"failed,omitempty" protobuf:"bytes,2,rep,name=failed,casttype=k8s.io/apimachinery/pkg/types.UID"` @@ -514,7 +528,6 @@ type CronJobSpec struct { // configuration, the controller will stop creating new new Jobs and will create a system event with the // reason UnknownTimeZone. // More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones - // This is beta field and must be enabled via the `CronJobTimeZone` feature gate. // +optional TimeZone *string `json:"timeZone,omitempty" protobuf:"bytes,8,opt,name=timeZone"` @@ -525,6 +538,7 @@ type CronJobSpec struct { // Specifies how to treat concurrent executions of a Job. // Valid values are: + // // - "Allow" (default): allows CronJobs to run concurrently; // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; // - "Replace": cancels currently running job and replaces it with a new one diff --git a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go index ab33a974cd..f6f3141f18 100644 --- a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CronJob = map[string]string{ @@ -51,9 +51,9 @@ func (CronJobList) SwaggerDoc() map[string]string { var map_CronJobSpec = map[string]string{ "": "CronJobSpec describes how the job execution will look like and when it will actually run.", "schedule": "The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.", - "timeZone": "The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. If not specified, this will default to the time zone of the kube-controller-manager process. The set of valid time zone names and the time zone offset is loaded from the system-wide time zone database by the API server during CronJob validation and the controller manager during execution. If no system-wide time zone database can be found a bundled version of the database is used instead. If the time zone name becomes invalid during the lifetime of a CronJob or due to a change in host configuration, the controller will stop creating new new Jobs and will create a system event with the reason UnknownTimeZone. More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones This is beta field and must be enabled via the `CronJobTimeZone` feature gate.", + "timeZone": "The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. If not specified, this will default to the time zone of the kube-controller-manager process. The set of valid time zone names and the time zone offset is loaded from the system-wide time zone database by the API server during CronJob validation and the controller manager during execution. If no system-wide time zone database can be found a bundled version of the database is used instead. If the time zone name becomes invalid during the lifetime of a CronJob or due to a change in host configuration, the controller will stop creating new new Jobs and will create a system event with the reason UnknownTimeZone. More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones", "startingDeadlineSeconds": "Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.", - "concurrencyPolicy": "Specifies how to treat concurrent executions of a Job. Valid values are: - \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one", + "concurrencyPolicy": "Specifies how to treat concurrent executions of a Job. Valid values are:\n\n- \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one", "suspend": "This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.", "jobTemplate": "Specifies the job that will be created when executing a CronJob.", "successfulJobsHistoryLimit": "The number of successful finished jobs to retain. Value must be non-negative integer. Defaults to 3.", @@ -113,16 +113,16 @@ func (JobList) SwaggerDoc() map[string]string { var map_JobSpec = map[string]string{ "": "JobSpec describes how the job execution will look like.", "parallelism": "Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", - "completions": "Specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", + "completions": "Specifies the desired number of successfully finished pods the job should be run with. Setting to null means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", "activeDeadlineSeconds": "Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again.", "podFailurePolicy": "Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure.\n\nThis field is beta-level. It can be used when the `JobPodFailurePolicy` feature gate is enabled (enabled by default).", "backoffLimit": "Specifies the number of retries before marking this job failed. Defaults to 6", "selector": "A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", "manualSelector": "manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector", - "template": "Describes the pod that will be created when executing a job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", + "template": "Describes the pod that will be created when executing a job. The only allowed template.spec.restartPolicy values are \"Never\" or \"OnFailure\". More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", "ttlSecondsAfterFinished": "ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes.", - "completionMode": "CompletionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\n\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\n\nMore completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job.", - "suspend": "Suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.", + "completionMode": "completionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\n\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\n\nMore completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job.", + "suspend": "suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.", } func (JobSpec) SwaggerDoc() map[string]string { @@ -137,8 +137,8 @@ var map_JobStatus = map[string]string{ "active": "The number of pending and running pods.", "succeeded": "The number of pods which reached phase Succeeded.", "failed": "The number of pods which reached phase Failed.", - "completedIndexes": "CompletedIndexes holds the completed indexes when .spec.completionMode = \"Indexed\" in a text format. The indexes are represented as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\".", - "uncountedTerminatedPods": "UncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status: (1) Add the pod UID to the arrays in this field. (2) Remove the pod finalizer. (3) Remove the pod UID from the arrays while increasing the corresponding\n counter.\n\nOld jobs might not be tracked using this field, in which case the field remains null.", + "completedIndexes": "completedIndexes holds the completed indexes when .spec.completionMode = \"Indexed\" in a text format. The indexes are represented as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\".", + "uncountedTerminatedPods": "uncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status:\n\n1. Add the pod UID to the arrays in this field. 2. Remove the pod finalizer. 3. Remove the pod UID from the arrays while increasing the corresponding\n counter.\n\nOld jobs might not be tracked using this field, in which case the field remains null.", "ready": "The number of pods which have a Ready condition.\n\nThis field is beta-level. The job controller populates the field when the feature gate JobReadyPods is enabled (enabled by default).", } @@ -168,7 +168,7 @@ func (PodFailurePolicy) SwaggerDoc() map[string]string { var map_PodFailurePolicyOnExitCodesRequirement = map[string]string{ "": "PodFailurePolicyOnExitCodesRequirement describes the requirement for handling a failed pod based on its container exit codes. In particular, it lookups the .state.terminated.exitCode for each app container and init container status, represented by the .status.containerStatuses and .status.initContainerStatuses fields in the Pod status, respectively. Containers completed with success (exit code 0) are excluded from the requirement check.", "containerName": "Restricts the check for exit codes to the container with the specified name. When null, the rule applies to all containers. When specified, it should match one the container or initContainer names in the pod template.", - "operator": "Represents the relationship between the container exit code(s) and the specified values. Containers completed with success (exit code 0) are excluded from the requirement check. Possible values are: - In: the requirement is satisfied if at least one container exit code\n (might be multiple if there are multiple containers not restricted\n by the 'containerName' field) is in the set of specified values.\n- NotIn: the requirement is satisfied if at least one container exit code\n (might be multiple if there are multiple containers not restricted\n by the 'containerName' field) is not in the set of specified values.\nAdditional values are considered to be added in the future. Clients should react to an unknown operator by assuming the requirement is not satisfied.", + "operator": "Represents the relationship between the container exit code(s) and the specified values. Containers completed with success (exit code 0) are excluded from the requirement check. Possible values are:\n\n- In: the requirement is satisfied if at least one container exit code\n (might be multiple if there are multiple containers not restricted\n by the 'containerName' field) is in the set of specified values.\n- NotIn: the requirement is satisfied if at least one container exit code\n (might be multiple if there are multiple containers not restricted\n by the 'containerName' field) is not in the set of specified values.\nAdditional values are considered to be added in the future. Clients should react to an unknown operator by assuming the requirement is not satisfied.", "values": "Specifies the set of values. Each returned container exit code (might be multiple in case of multiple containers) is checked against this set of values with respect to the operator. The list of values must be ordered and must not contain duplicates. Value '0' cannot be used for the In operator. At least one element is required. At most 255 elements are allowed.", } @@ -187,8 +187,8 @@ func (PodFailurePolicyOnPodConditionsPattern) SwaggerDoc() map[string]string { } var map_PodFailurePolicyRule = map[string]string{ - "": "PodFailurePolicyRule describes how a pod failure is handled when the requirements are met. One of OnExitCodes and onPodConditions, but not both, can be used in each rule.", - "action": "Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are: - FailJob: indicates that the pod's job is marked as Failed and all\n running pods are terminated.\n- Ignore: indicates that the counter towards the .backoffLimit is not\n incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule.", + "": "PodFailurePolicyRule describes how a pod failure is handled when the requirements are met. One of onExitCodes and onPodConditions, but not both, can be used in each rule.", + "action": "Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are:\n\n- FailJob: indicates that the pod's job is marked as Failed and all\n running pods are terminated.\n- Ignore: indicates that the counter towards the .backoffLimit is not\n incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule.", "onExitCodes": "Represents the requirement on the container exit codes.", "onPodConditions": "Represents the requirement on the pod conditions. The requirement is represented as a list of pod condition patterns. The requirement is satisfied if at least one pattern matches an actual pod condition. At most 20 elements are allowed.", } @@ -199,8 +199,8 @@ func (PodFailurePolicyRule) SwaggerDoc() map[string]string { var map_UncountedTerminatedPods = map[string]string{ "": "UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't been accounted in Job status counters.", - "succeeded": "Succeeded holds UIDs of succeeded Pods.", - "failed": "Failed holds UIDs of failed Pods.", + "succeeded": "succeeded holds UIDs of succeeded Pods.", + "failed": "failed holds UIDs of failed Pods.", } func (UncountedTerminatedPods) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/batch/v1beta1/generated.pb.go b/vendor/k8s.io/api/batch/v1beta1/generated.pb.go index d042fc6951..03feb2ceaf 100644 --- a/vendor/k8s.io/api/batch/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/batch/v1beta1/generated.pb.go @@ -157,38 +157,10 @@ func (m *CronJobStatus) XXX_DiscardUnknown() { var xxx_messageInfo_CronJobStatus proto.InternalMessageInfo -func (m *JobTemplate) Reset() { *m = JobTemplate{} } -func (*JobTemplate) ProtoMessage() {} -func (*JobTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_e57b277b05179ae7, []int{4} -} -func (m *JobTemplate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *JobTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *JobTemplate) XXX_Merge(src proto.Message) { - xxx_messageInfo_JobTemplate.Merge(m, src) -} -func (m *JobTemplate) XXX_Size() int { - return m.Size() -} -func (m *JobTemplate) XXX_DiscardUnknown() { - xxx_messageInfo_JobTemplate.DiscardUnknown(m) -} - -var xxx_messageInfo_JobTemplate proto.InternalMessageInfo - func (m *JobTemplateSpec) Reset() { *m = JobTemplateSpec{} } func (*JobTemplateSpec) ProtoMessage() {} func (*JobTemplateSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_e57b277b05179ae7, []int{5} + return fileDescriptor_e57b277b05179ae7, []int{4} } func (m *JobTemplateSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -218,7 +190,6 @@ func init() { proto.RegisterType((*CronJobList)(nil), "k8s.io.api.batch.v1beta1.CronJobList") proto.RegisterType((*CronJobSpec)(nil), "k8s.io.api.batch.v1beta1.CronJobSpec") proto.RegisterType((*CronJobStatus)(nil), "k8s.io.api.batch.v1beta1.CronJobStatus") - proto.RegisterType((*JobTemplate)(nil), "k8s.io.api.batch.v1beta1.JobTemplate") proto.RegisterType((*JobTemplateSpec)(nil), "k8s.io.api.batch.v1beta1.JobTemplateSpec") } @@ -227,58 +198,57 @@ func init() { } var fileDescriptor_e57b277b05179ae7 = []byte{ - // 814 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x96, 0x41, 0x6f, 0x1b, 0x45, - 0x14, 0xc7, 0xbd, 0x4e, 0x9c, 0xb8, 0xe3, 0x16, 0xd2, 0x01, 0xa5, 0x2b, 0x83, 0xd6, 0xc1, 0x55, - 0x85, 0x41, 0x30, 0x4b, 0x22, 0x84, 0x38, 0x55, 0xea, 0x16, 0x15, 0x08, 0x41, 0x45, 0xe3, 0x22, - 0xa4, 0xaa, 0x42, 0x9d, 0x1d, 0xbf, 0x38, 0xd3, 0x78, 0x77, 0x56, 0x3b, 0xb3, 0x91, 0x72, 0xe3, - 0xc2, 0x9d, 0xef, 0xc2, 0x9d, 0x73, 0x8e, 0xbd, 0xd1, 0xd3, 0x8a, 0x2c, 0xdf, 0x82, 0x13, 0x9a, - 0xf1, 0x7a, 0xed, 0xda, 0xeb, 0xa6, 0xbd, 0xf4, 0xe6, 0x79, 0xf3, 0xff, 0xff, 0xe6, 0xed, 0x7b, - 0x6f, 0x67, 0x8d, 0xee, 0x9d, 0x7e, 0xad, 0x88, 0x90, 0xfe, 0x69, 0x16, 0x42, 0x1a, 0x83, 0x06, - 0xe5, 0x9f, 0x41, 0x3c, 0x92, 0xa9, 0x5f, 0x6e, 0xb0, 0x44, 0xf8, 0x21, 0xd3, 0xfc, 0xc4, 0x3f, - 0xdb, 0x0f, 0x41, 0xb3, 0x7d, 0x7f, 0x0c, 0x31, 0xa4, 0x4c, 0xc3, 0x88, 0x24, 0xa9, 0xd4, 0x12, - 0xbb, 0x53, 0x25, 0x61, 0x89, 0x20, 0x56, 0x49, 0x4a, 0x65, 0xf7, 0xf3, 0xb1, 0xd0, 0x27, 0x59, - 0x48, 0xb8, 0x8c, 0xfc, 0xb1, 0x1c, 0x4b, 0xdf, 0x1a, 0xc2, 0xec, 0xd8, 0xae, 0xec, 0xc2, 0xfe, - 0x9a, 0x82, 0xba, 0xb7, 0x6b, 0x8e, 0x5c, 0x3e, 0xad, 0xdb, 0x5f, 0x10, 0x71, 0x99, 0x42, 0x9d, - 0xe6, 0xcb, 0xb9, 0x26, 0x62, 0xfc, 0x44, 0xc4, 0x90, 0x9e, 0xfb, 0xc9, 0xe9, 0xd8, 0x04, 0x94, - 0x1f, 0x81, 0x66, 0x75, 0x2e, 0x7f, 0x9d, 0x2b, 0xcd, 0x62, 0x2d, 0x22, 0x58, 0x31, 0x7c, 0x75, - 0x95, 0x41, 0xf1, 0x13, 0x88, 0xd8, 0xb2, 0xaf, 0xff, 0x7b, 0x13, 0x6d, 0xdf, 0x4f, 0x65, 0x7c, - 0x28, 0x43, 0xfc, 0x14, 0xb5, 0x4d, 0x3e, 0x23, 0xa6, 0x99, 0xeb, 0xec, 0x39, 0x83, 0xce, 0xc1, - 0x17, 0x64, 0x5e, 0xcf, 0x0a, 0x4b, 0x92, 0xd3, 0xb1, 0x09, 0x28, 0x62, 0xd4, 0xe4, 0x6c, 0x9f, - 0x3c, 0x0c, 0x9f, 0x01, 0xd7, 0x3f, 0x82, 0x66, 0x01, 0xbe, 0xc8, 0x7b, 0x8d, 0x22, 0xef, 0xa1, - 0x79, 0x8c, 0x56, 0x54, 0xfc, 0x2d, 0xda, 0x54, 0x09, 0x70, 0xb7, 0x69, 0xe9, 0x77, 0xc8, 0xba, - 0x6e, 0x91, 0x32, 0xa5, 0x61, 0x02, 0x3c, 0xb8, 0x5e, 0x22, 0x37, 0xcd, 0x8a, 0x5a, 0x00, 0x7e, - 0x88, 0xb6, 0x94, 0x66, 0x3a, 0x53, 0xee, 0x86, 0x45, 0x7d, 0x7c, 0x35, 0xca, 0xca, 0x83, 0x77, - 0x4a, 0xd8, 0xd6, 0x74, 0x4d, 0x4b, 0x4c, 0xff, 0x4f, 0x07, 0x75, 0x4a, 0xe5, 0x91, 0x50, 0x1a, - 0x3f, 0x59, 0xa9, 0x05, 0x79, 0xbd, 0x5a, 0x18, 0xb7, 0xad, 0xc4, 0x4e, 0x79, 0x52, 0x7b, 0x16, - 0x59, 0xa8, 0xc3, 0x03, 0xd4, 0x12, 0x1a, 0x22, 0xe5, 0x36, 0xf7, 0x36, 0x06, 0x9d, 0x83, 0x8f, - 0xae, 0xcc, 0x3e, 0xb8, 0x51, 0xd2, 0x5a, 0xdf, 0x1b, 0x1f, 0x9d, 0xda, 0xfb, 0x7f, 0x6f, 0x56, - 0x59, 0x9b, 0xe2, 0xe0, 0xcf, 0x50, 0xdb, 0xf4, 0x79, 0x94, 0x4d, 0xc0, 0x66, 0x7d, 0x6d, 0x9e, - 0xc5, 0xb0, 0x8c, 0xd3, 0x4a, 0x81, 0x07, 0xa8, 0x6d, 0x46, 0xe3, 0xb1, 0x8c, 0xc1, 0x6d, 0x5b, - 0xf5, 0x75, 0xa3, 0x7c, 0x54, 0xc6, 0x68, 0xb5, 0x8b, 0x7f, 0x46, 0xb7, 0x94, 0x66, 0xa9, 0x16, - 0xf1, 0xf8, 0x1b, 0x60, 0xa3, 0x89, 0x88, 0x61, 0x08, 0x5c, 0xc6, 0x23, 0x65, 0x5b, 0xb9, 0x11, - 0x7c, 0x50, 0xe4, 0xbd, 0x5b, 0xc3, 0x7a, 0x09, 0x5d, 0xe7, 0xc5, 0x4f, 0xd0, 0x4d, 0x2e, 0x63, - 0x9e, 0xa5, 0x29, 0xc4, 0xfc, 0xfc, 0x27, 0x39, 0x11, 0xfc, 0xdc, 0x36, 0xf4, 0x5a, 0x40, 0xca, - 0xbc, 0x6f, 0xde, 0x5f, 0x16, 0xfc, 0x57, 0x17, 0xa4, 0xab, 0x20, 0x7c, 0x07, 0x6d, 0xab, 0x4c, - 0x25, 0x10, 0x8f, 0xdc, 0xcd, 0x3d, 0x67, 0xd0, 0x0e, 0x3a, 0x45, 0xde, 0xdb, 0x1e, 0x4e, 0x43, - 0x74, 0xb6, 0x87, 0x9f, 0xa2, 0xce, 0x33, 0x19, 0x3e, 0x82, 0x28, 0x99, 0x30, 0x0d, 0x6e, 0xcb, - 0x36, 0xfb, 0x93, 0xf5, 0x1d, 0x39, 0x9c, 0x8b, 0xed, 0x78, 0xbe, 0x57, 0x66, 0xda, 0x59, 0xd8, - 0xa0, 0x8b, 0x48, 0xfc, 0x2b, 0xea, 0xaa, 0x8c, 0x73, 0x50, 0xea, 0x38, 0x9b, 0x1c, 0xca, 0x50, - 0x7d, 0x27, 0x94, 0x96, 0xe9, 0xf9, 0x91, 0x88, 0x84, 0x76, 0xb7, 0xf6, 0x9c, 0x41, 0x2b, 0xf0, - 0x8a, 0xbc, 0xd7, 0x1d, 0xae, 0x55, 0xd1, 0x57, 0x10, 0x30, 0x45, 0xbb, 0xc7, 0x4c, 0x4c, 0x60, - 0xb4, 0xc2, 0xde, 0xb6, 0xec, 0x6e, 0x91, 0xf7, 0x76, 0x1f, 0xd4, 0x2a, 0xe8, 0x1a, 0x67, 0xff, - 0xaf, 0x26, 0xba, 0xf1, 0xd2, 0x9b, 0x83, 0x7f, 0x40, 0x5b, 0x8c, 0x6b, 0x71, 0x66, 0x26, 0xcb, - 0x0c, 0xed, 0xed, 0xc5, 0x12, 0x99, 0xdb, 0x6f, 0x7e, 0x13, 0x50, 0x38, 0x06, 0xd3, 0x09, 0x98, - 0xbf, 0x6e, 0xf7, 0xac, 0x95, 0x96, 0x08, 0x3c, 0x41, 0x3b, 0x13, 0xa6, 0xf4, 0x6c, 0x28, 0xcd, - 0xc8, 0xd9, 0x26, 0x75, 0x0e, 0x3e, 0x7d, 0xbd, 0xd7, 0xcc, 0x38, 0x82, 0xf7, 0x8b, 0xbc, 0xb7, - 0x73, 0xb4, 0xc4, 0xa1, 0x2b, 0x64, 0x9c, 0x22, 0x6c, 0x63, 0x55, 0x09, 0xed, 0x79, 0xad, 0x37, - 0x3e, 0x6f, 0xb7, 0xc8, 0x7b, 0xf8, 0x68, 0x85, 0x44, 0x6b, 0xe8, 0xfd, 0x0b, 0x07, 0x2d, 0x4e, - 0xc4, 0x5b, 0xb8, 0x5c, 0x7f, 0x41, 0x6d, 0x3d, 0x9b, 0xe2, 0xe6, 0x9b, 0x4e, 0x71, 0x75, 0x4f, - 0x54, 0x23, 0x5c, 0xc1, 0xcc, 0xdd, 0xf8, 0xee, 0x92, 0xfe, 0x2d, 0x3c, 0xce, 0xdd, 0x97, 0xbe, - 0x15, 0x1f, 0xd6, 0x3d, 0x0a, 0x79, 0xc5, 0x27, 0x22, 0xb8, 0x7b, 0x71, 0xe9, 0x35, 0x9e, 0x5f, - 0x7a, 0x8d, 0x17, 0x97, 0x5e, 0xe3, 0xb7, 0xc2, 0x73, 0x2e, 0x0a, 0xcf, 0x79, 0x5e, 0x78, 0xce, - 0x8b, 0xc2, 0x73, 0xfe, 0x29, 0x3c, 0xe7, 0x8f, 0x7f, 0xbd, 0xc6, 0x63, 0x77, 0xdd, 0x5f, 0x8b, - 0xff, 0x03, 0x00, 0x00, 0xff, 0xff, 0xd7, 0xf2, 0x8b, 0xe9, 0x8e, 0x08, 0x00, 0x00, + // 787 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x95, 0x41, 0x6f, 0x1b, 0x45, + 0x14, 0xc7, 0xbd, 0x49, 0x9c, 0xb8, 0xe3, 0x16, 0xd2, 0x01, 0xa5, 0x2b, 0x83, 0xd6, 0xc1, 0x55, + 0x85, 0x41, 0x30, 0x4b, 0x22, 0x84, 0x38, 0x55, 0xea, 0x16, 0x15, 0x08, 0x41, 0x45, 0xe3, 0x72, + 0xa9, 0x2a, 0xd4, 0xd9, 0xd9, 0x17, 0x67, 0x9a, 0xdd, 0x9d, 0xd5, 0xce, 0x6c, 0xa4, 0xdc, 0xb8, + 0x70, 0xe7, 0xbb, 0x70, 0xe7, 0x9c, 0x63, 0x6f, 0xf4, 0xb4, 0x22, 0xcb, 0xb7, 0xe0, 0x84, 0x66, + 0xbc, 0xb1, 0x5d, 0x7b, 0xdd, 0x84, 0x4b, 0x6f, 0x9e, 0x37, 0xff, 0xff, 0x6f, 0x9e, 0xde, 0x7b, + 0xfb, 0x8c, 0x1e, 0x9c, 0x7c, 0xad, 0x88, 0x90, 0xfe, 0x49, 0x11, 0x42, 0x9e, 0x82, 0x06, 0xe5, + 0x9f, 0x42, 0x1a, 0xc9, 0xdc, 0xaf, 0x2f, 0x58, 0x26, 0xfc, 0x90, 0x69, 0x7e, 0xec, 0x9f, 0xee, + 0x85, 0xa0, 0xd9, 0x9e, 0x3f, 0x86, 0x14, 0x72, 0xa6, 0x21, 0x22, 0x59, 0x2e, 0xb5, 0xc4, 0xee, + 0x44, 0x49, 0x58, 0x26, 0x88, 0x55, 0x92, 0x5a, 0xd9, 0xfb, 0x7c, 0x2c, 0xf4, 0x71, 0x11, 0x12, + 0x2e, 0x13, 0x7f, 0x2c, 0xc7, 0xd2, 0xb7, 0x86, 0xb0, 0x38, 0xb2, 0x27, 0x7b, 0xb0, 0xbf, 0x26, + 0xa0, 0xde, 0xdd, 0x86, 0x27, 0x17, 0x5f, 0xeb, 0x0d, 0xe6, 0x44, 0x5c, 0xe6, 0xd0, 0xa4, 0xf9, + 0x72, 0xa6, 0x49, 0x18, 0x3f, 0x16, 0x29, 0xe4, 0x67, 0x7e, 0x76, 0x32, 0x36, 0x01, 0xe5, 0x27, + 0xa0, 0x59, 0x93, 0xcb, 0x5f, 0xe5, 0xca, 0x8b, 0x54, 0x8b, 0x04, 0x96, 0x0c, 0x5f, 0x5d, 0x65, + 0x50, 0xfc, 0x18, 0x12, 0xb6, 0xe8, 0x1b, 0xfc, 0xb6, 0x86, 0xb6, 0x1e, 0xe6, 0x32, 0x3d, 0x90, + 0x21, 0x7e, 0x8e, 0x3a, 0x26, 0x9f, 0x88, 0x69, 0xe6, 0x3a, 0xbb, 0xce, 0xb0, 0xbb, 0xff, 0x05, + 0x99, 0xd5, 0x73, 0x8a, 0x25, 0xd9, 0xc9, 0xd8, 0x04, 0x14, 0x31, 0x6a, 0x72, 0xba, 0x47, 0x1e, + 0x87, 0x2f, 0x80, 0xeb, 0x1f, 0x41, 0xb3, 0x00, 0x9f, 0x97, 0xfd, 0x56, 0x55, 0xf6, 0xd1, 0x2c, + 0x46, 0xa7, 0x54, 0xfc, 0x2d, 0xda, 0x50, 0x19, 0x70, 0x77, 0xcd, 0xd2, 0xef, 0x91, 0x55, 0xdd, + 0x22, 0x75, 0x4a, 0xa3, 0x0c, 0x78, 0x70, 0xb3, 0x46, 0x6e, 0x98, 0x13, 0xb5, 0x00, 0xfc, 0x18, + 0x6d, 0x2a, 0xcd, 0x74, 0xa1, 0xdc, 0x75, 0x8b, 0xfa, 0xf8, 0x6a, 0x94, 0x95, 0x07, 0xef, 0xd4, + 0xb0, 0xcd, 0xc9, 0x99, 0xd6, 0x98, 0xc1, 0x1f, 0x0e, 0xea, 0xd6, 0xca, 0x43, 0xa1, 0x34, 0x7e, + 0xb6, 0x54, 0x0b, 0x72, 0xbd, 0x5a, 0x18, 0xb7, 0xad, 0xc4, 0x76, 0xfd, 0x52, 0xe7, 0x32, 0x32, + 0x57, 0x87, 0x47, 0xa8, 0x2d, 0x34, 0x24, 0xca, 0x5d, 0xdb, 0x5d, 0x1f, 0x76, 0xf7, 0x3f, 0xba, + 0x32, 0xfb, 0xe0, 0x56, 0x4d, 0x6b, 0x7f, 0x6f, 0x7c, 0x74, 0x62, 0x1f, 0xfc, 0xb5, 0x31, 0xcd, + 0xda, 0x14, 0x07, 0x7f, 0x86, 0x3a, 0xa6, 0xcf, 0x51, 0x11, 0x83, 0xcd, 0xfa, 0xc6, 0x2c, 0x8b, + 0x51, 0x1d, 0xa7, 0x53, 0x05, 0x1e, 0xa2, 0x8e, 0x19, 0x8d, 0xa7, 0x32, 0x05, 0xb7, 0x63, 0xd5, + 0x37, 0x8d, 0xf2, 0x49, 0x1d, 0xa3, 0xd3, 0x5b, 0xfc, 0x33, 0xba, 0xa3, 0x34, 0xcb, 0xb5, 0x48, + 0xc7, 0xdf, 0x00, 0x8b, 0x62, 0x91, 0xc2, 0x08, 0xb8, 0x4c, 0x23, 0x65, 0x5b, 0xb9, 0x1e, 0x7c, + 0x50, 0x95, 0xfd, 0x3b, 0xa3, 0x66, 0x09, 0x5d, 0xe5, 0xc5, 0xcf, 0xd0, 0x6d, 0x2e, 0x53, 0x5e, + 0xe4, 0x39, 0xa4, 0xfc, 0xec, 0x27, 0x19, 0x0b, 0x7e, 0x66, 0x1b, 0x7a, 0x23, 0x20, 0x75, 0xde, + 0xb7, 0x1f, 0x2e, 0x0a, 0xfe, 0x6d, 0x0a, 0xd2, 0x65, 0x10, 0xbe, 0x87, 0xb6, 0x54, 0xa1, 0x32, + 0x48, 0x23, 0x77, 0x63, 0xd7, 0x19, 0x76, 0x82, 0x6e, 0x55, 0xf6, 0xb7, 0x46, 0x93, 0x10, 0xbd, + 0xbc, 0xc3, 0xcf, 0x51, 0xf7, 0x85, 0x0c, 0x9f, 0x40, 0x92, 0xc5, 0x4c, 0x83, 0xdb, 0xb6, 0xcd, + 0xfe, 0x64, 0x75, 0x47, 0x0e, 0x66, 0x62, 0x3b, 0x9e, 0xef, 0xd5, 0x99, 0x76, 0xe7, 0x2e, 0xe8, + 0x3c, 0x12, 0xff, 0x82, 0x7a, 0xaa, 0xe0, 0x1c, 0x94, 0x3a, 0x2a, 0xe2, 0x03, 0x19, 0xaa, 0xef, + 0x84, 0xd2, 0x32, 0x3f, 0x3b, 0x14, 0x89, 0xd0, 0xee, 0xe6, 0xae, 0x33, 0x6c, 0x07, 0x5e, 0x55, + 0xf6, 0x7b, 0xa3, 0x95, 0x2a, 0xfa, 0x06, 0x02, 0xa6, 0x68, 0xe7, 0x88, 0x89, 0x18, 0xa2, 0x25, + 0xf6, 0x96, 0x65, 0xf7, 0xaa, 0xb2, 0xbf, 0xf3, 0xa8, 0x51, 0x41, 0x57, 0x38, 0x07, 0x7f, 0xae, + 0xa1, 0x5b, 0xaf, 0x7d, 0x39, 0xf8, 0x07, 0xb4, 0xc9, 0xb8, 0x16, 0xa7, 0x66, 0xb2, 0xcc, 0xd0, + 0xde, 0x9d, 0x2f, 0x91, 0xd9, 0x7e, 0xb3, 0x4d, 0x40, 0xe1, 0x08, 0x4c, 0x27, 0x60, 0xf6, 0xb9, + 0x3d, 0xb0, 0x56, 0x5a, 0x23, 0x70, 0x8c, 0xb6, 0x63, 0xa6, 0xf4, 0xe5, 0x50, 0x9a, 0x91, 0xb3, + 0x4d, 0xea, 0xee, 0x7f, 0x7a, 0xbd, 0xcf, 0xcc, 0x38, 0x82, 0xf7, 0xab, 0xb2, 0xbf, 0x7d, 0xb8, + 0xc0, 0xa1, 0x4b, 0x64, 0x9c, 0x23, 0x6c, 0x63, 0xd3, 0x12, 0xda, 0xf7, 0xda, 0xff, 0xfb, 0xbd, + 0x9d, 0xaa, 0xec, 0xe3, 0xc3, 0x25, 0x12, 0x6d, 0xa0, 0x9b, 0x85, 0xf2, 0xee, 0xc2, 0xa8, 0xbc, + 0x85, 0x05, 0x7b, 0xff, 0xb5, 0x05, 0xfb, 0x61, 0xd3, 0x14, 0x93, 0x37, 0xec, 0xd5, 0xe0, 0xfe, + 0xf9, 0x85, 0xd7, 0x7a, 0x79, 0xe1, 0xb5, 0x5e, 0x5d, 0x78, 0xad, 0x5f, 0x2b, 0xcf, 0x39, 0xaf, + 0x3c, 0xe7, 0x65, 0xe5, 0x39, 0xaf, 0x2a, 0xcf, 0xf9, 0xbb, 0xf2, 0x9c, 0xdf, 0xff, 0xf1, 0x5a, + 0x4f, 0xdd, 0x55, 0xff, 0xc7, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x61, 0x72, 0xc3, 0xe0, 0xc3, + 0x07, 0x00, 0x00, } func (m *CronJob) Marshal() (dAtA []byte, err error) { @@ -517,49 +487,6 @@ func (m *CronJobStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *JobTemplate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *JobTemplate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *JobTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - func (m *JobTemplateSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -700,19 +627,6 @@ func (m *CronJobStatus) Size() (n int) { return n } -func (m *JobTemplate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - func (m *JobTemplateSpec) Size() (n int) { if m == nil { return 0 @@ -794,17 +708,6 @@ func (this *CronJobStatus) String() string { }, "") return s } -func (this *JobTemplate) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&JobTemplate{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "JobTemplateSpec", "JobTemplateSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} func (this *JobTemplateSpec) String() string { if this == nil { return "nil" @@ -1507,122 +1410,6 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *JobTemplate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: JobTemplate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: JobTemplate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/k8s.io/api/batch/v1beta1/generated.proto b/vendor/k8s.io/api/batch/v1beta1/generated.proto index d8386a8f51..ac774f19ad 100644 --- a/vendor/k8s.io/api/batch/v1beta1/generated.proto +++ b/vendor/k8s.io/api/batch/v1beta1/generated.proto @@ -73,7 +73,6 @@ message CronJobSpec { // configuration, the controller will stop creating new new Jobs and will create a system event with the // reason UnknownTimeZone. // More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones - // This is beta field and must be enabled via the `CronJobTimeZone` feature gate. // +optional optional string timeZone = 8; @@ -84,6 +83,7 @@ message CronJobSpec { // Specifies how to treat concurrent executions of a Job. // Valid values are: + // // - "Allow" (default): allows CronJobs to run concurrently; // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; // - "Replace": cancels currently running job and replaces it with a new one @@ -127,19 +127,6 @@ message CronJobStatus { optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastSuccessfulTime = 5; } -// JobTemplate describes a template for creating copies of a predefined pod. -message JobTemplate { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Defines jobs that will be created from this template. - // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - optional JobTemplateSpec template = 2; -} - // JobTemplateSpec describes the data a Job should have when created from a template message JobTemplateSpec { // Standard object's metadata of the jobs created from this template. diff --git a/vendor/k8s.io/api/batch/v1beta1/register.go b/vendor/k8s.io/api/batch/v1beta1/register.go index 226de49f4d..9382ca23f2 100644 --- a/vendor/k8s.io/api/batch/v1beta1/register.go +++ b/vendor/k8s.io/api/batch/v1beta1/register.go @@ -44,7 +44,6 @@ var ( // Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, - &JobTemplate{}, &CronJob{}, &CronJobList{}, ) diff --git a/vendor/k8s.io/api/batch/v1beta1/types.go b/vendor/k8s.io/api/batch/v1beta1/types.go index 4c0d69dd6b..976752a926 100644 --- a/vendor/k8s.io/api/batch/v1beta1/types.go +++ b/vendor/k8s.io/api/batch/v1beta1/types.go @@ -22,24 +22,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.8 -// +k8s:prerelease-lifecycle-gen:deprecated=1.22 - -// JobTemplate describes a template for creating copies of a predefined pod. -type JobTemplate struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Defines jobs that will be created from this template. - // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - Template JobTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` -} - // JobTemplateSpec describes the data a Job should have when created from a template type JobTemplateSpec struct { // Standard object's metadata of the jobs created from this template. @@ -113,7 +95,6 @@ type CronJobSpec struct { // configuration, the controller will stop creating new new Jobs and will create a system event with the // reason UnknownTimeZone. // More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones - // This is beta field and must be enabled via the `CronJobTimeZone` feature gate. // +optional TimeZone *string `json:"timeZone,omitempty" protobuf:"bytes,8,opt,name=timeZone"` @@ -124,6 +105,7 @@ type CronJobSpec struct { // Specifies how to treat concurrent executions of a Job. // Valid values are: + // // - "Allow" (default): allows CronJobs to run concurrently; // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; // - "Replace": cancels currently running job and replaces it with a new one diff --git a/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go index 5716bbb862..3b3eafe8cc 100644 --- a/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CronJob = map[string]string{ @@ -51,9 +51,9 @@ func (CronJobList) SwaggerDoc() map[string]string { var map_CronJobSpec = map[string]string{ "": "CronJobSpec describes how the job execution will look like and when it will actually run.", "schedule": "The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.", - "timeZone": "The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. If not specified, this will default to the time zone of the kube-controller-manager process. The set of valid time zone names and the time zone offset is loaded from the system-wide time zone database by the API server during CronJob validation and the controller manager during execution. If no system-wide time zone database can be found a bundled version of the database is used instead. If the time zone name becomes invalid during the lifetime of a CronJob or due to a change in host configuration, the controller will stop creating new new Jobs and will create a system event with the reason UnknownTimeZone. More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones This is beta field and must be enabled via the `CronJobTimeZone` feature gate.", + "timeZone": "The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. If not specified, this will default to the time zone of the kube-controller-manager process. The set of valid time zone names and the time zone offset is loaded from the system-wide time zone database by the API server during CronJob validation and the controller manager during execution. If no system-wide time zone database can be found a bundled version of the database is used instead. If the time zone name becomes invalid during the lifetime of a CronJob or due to a change in host configuration, the controller will stop creating new new Jobs and will create a system event with the reason UnknownTimeZone. More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones", "startingDeadlineSeconds": "Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.", - "concurrencyPolicy": "Specifies how to treat concurrent executions of a Job. Valid values are: - \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one", + "concurrencyPolicy": "Specifies how to treat concurrent executions of a Job. Valid values are:\n\n- \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one", "suspend": "This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.", "jobTemplate": "Specifies the job that will be created when executing a CronJob.", "successfulJobsHistoryLimit": "The number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. Defaults to 3.", @@ -75,16 +75,6 @@ func (CronJobStatus) SwaggerDoc() map[string]string { return map_CronJobStatus } -var map_JobTemplate = map[string]string{ - "": "JobTemplate describes a template for creating copies of a predefined pod.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "template": "Defines jobs that will be created from this template. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", -} - -func (JobTemplate) SwaggerDoc() map[string]string { - return map_JobTemplate -} - var map_JobTemplateSpec = map[string]string{ "": "JobTemplateSpec describes the data a Job should have when created from a template", "metadata": "Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", diff --git a/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go index c3a3494c4a..2c8570332a 100644 --- a/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go @@ -158,33 +158,6 @@ func (in *CronJobStatus) DeepCopy() *CronJobStatus { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *JobTemplate) DeepCopyInto(out *JobTemplate) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Template.DeepCopyInto(&out.Template) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobTemplate. -func (in *JobTemplate) DeepCopy() *JobTemplate { - if in == nil { - return nil - } - out := new(JobTemplate) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *JobTemplate) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *JobTemplateSpec) DeepCopyInto(out *JobTemplateSpec) { *out = *in diff --git a/vendor/k8s.io/api/batch/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/batch/v1beta1/zz_generated.prerelease-lifecycle.go index 2836b3b014..b57e9f1b86 100644 --- a/vendor/k8s.io/api/batch/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/batch/v1beta1/zz_generated.prerelease-lifecycle.go @@ -72,21 +72,3 @@ func (in *CronJobList) APILifecycleReplacement() schema.GroupVersionKind { func (in *CronJobList) APILifecycleRemoved() (major, minor int) { return 1, 25 } - -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *JobTemplate) APILifecycleIntroduced() (major, minor int) { - return 1, 8 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *JobTemplate) APILifecycleDeprecated() (major, minor int) { - return 1, 22 -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *JobTemplate) APILifecycleRemoved() (major, minor int) { - return 1, 25 -} diff --git a/vendor/k8s.io/api/certificates/v1/types.go b/vendor/k8s.io/api/certificates/v1/types.go index af5efb5165..92b2018e76 100644 --- a/vendor/k8s.io/api/certificates/v1/types.go +++ b/vendor/k8s.io/api/certificates/v1/types.go @@ -274,8 +274,9 @@ type CertificateSigningRequestList struct { } // KeyUsage specifies valid usage contexts for keys. -// See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 +// See: // +// https://tools.ietf.org/html/rfc5280#section-4.2.1.3 // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 // // +enum diff --git a/vendor/k8s.io/api/certificates/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/certificates/v1/types_swagger_doc_generated.go index 0dc8a4c69b..4bdf39ebb3 100644 --- a/vendor/k8s.io/api/certificates/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/certificates/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CertificateSigningRequest = map[string]string{ diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/doc.go b/vendor/k8s.io/api/certificates/v1alpha1/doc.go similarity index 61% rename from vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/doc.go rename to vendor/k8s.io/api/certificates/v1alpha1/doc.go index 17c60895f0..d83d0e8207 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/doc.go +++ b/vendor/k8s.io/api/certificates/v1alpha1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,9 +14,11 @@ See the License for the specific language governing permissions and limitations under the License. */ -/* -Package inject defines interfaces and functions for propagating dependencies from a ControllerManager to -the components registered with it. Dependencies are propagated to Reconciler, Source, EventHandler and Predicate -objects which implement the Injectable interfaces. -*/ -package inject +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true + +// +groupName=certificates.k8s.io + +package v1alpha1 // import "k8s.io/api/certificates/v1alpha1" diff --git a/vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go b/vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go new file mode 100644 index 0000000000..546ecbefbf --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go @@ -0,0 +1,831 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1alpha1/generated.proto + +package v1alpha1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *ClusterTrustBundle) Reset() { *m = ClusterTrustBundle{} } +func (*ClusterTrustBundle) ProtoMessage() {} +func (*ClusterTrustBundle) Descriptor() ([]byte, []int) { + return fileDescriptor_8915b0d419f9eda6, []int{0} +} +func (m *ClusterTrustBundle) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterTrustBundle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterTrustBundle) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterTrustBundle.Merge(m, src) +} +func (m *ClusterTrustBundle) XXX_Size() int { + return m.Size() +} +func (m *ClusterTrustBundle) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterTrustBundle.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterTrustBundle proto.InternalMessageInfo + +func (m *ClusterTrustBundleList) Reset() { *m = ClusterTrustBundleList{} } +func (*ClusterTrustBundleList) ProtoMessage() {} +func (*ClusterTrustBundleList) Descriptor() ([]byte, []int) { + return fileDescriptor_8915b0d419f9eda6, []int{1} +} +func (m *ClusterTrustBundleList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterTrustBundleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterTrustBundleList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterTrustBundleList.Merge(m, src) +} +func (m *ClusterTrustBundleList) XXX_Size() int { + return m.Size() +} +func (m *ClusterTrustBundleList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterTrustBundleList.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterTrustBundleList proto.InternalMessageInfo + +func (m *ClusterTrustBundleSpec) Reset() { *m = ClusterTrustBundleSpec{} } +func (*ClusterTrustBundleSpec) ProtoMessage() {} +func (*ClusterTrustBundleSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_8915b0d419f9eda6, []int{2} +} +func (m *ClusterTrustBundleSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterTrustBundleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterTrustBundleSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterTrustBundleSpec.Merge(m, src) +} +func (m *ClusterTrustBundleSpec) XXX_Size() int { + return m.Size() +} +func (m *ClusterTrustBundleSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterTrustBundleSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterTrustBundleSpec proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ClusterTrustBundle)(nil), "k8s.io.api.certificates.v1alpha1.ClusterTrustBundle") + proto.RegisterType((*ClusterTrustBundleList)(nil), "k8s.io.api.certificates.v1alpha1.ClusterTrustBundleList") + proto.RegisterType((*ClusterTrustBundleSpec)(nil), "k8s.io.api.certificates.v1alpha1.ClusterTrustBundleSpec") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1alpha1/generated.proto", fileDescriptor_8915b0d419f9eda6) +} + +var fileDescriptor_8915b0d419f9eda6 = []byte{ + // 448 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0xcf, 0x6b, 0x13, 0x41, + 0x14, 0xc7, 0x77, 0x6a, 0x0b, 0xed, 0x44, 0x41, 0x56, 0x90, 0x90, 0xc3, 0x34, 0xe4, 0xd4, 0x8b, + 0x33, 0x26, 0x54, 0xe9, 0x79, 0x05, 0xa1, 0xe0, 0x0f, 0xd8, 0x7a, 0xb1, 0x78, 0x70, 0x32, 0x79, + 0xdd, 0x8c, 0xc9, 0xee, 0x0e, 0x33, 0xb3, 0x01, 0x6f, 0x82, 0xff, 0x80, 0x7f, 0x56, 0x8e, 0xd5, + 0x53, 0x4f, 0xc5, 0xac, 0xff, 0x88, 0xcc, 0x64, 0x93, 0x5d, 0x5c, 0x25, 0xd2, 0xdb, 0xbe, 0x1f, + 0x9f, 0xef, 0x7b, 0xdf, 0xb7, 0x0c, 0x3e, 0x9f, 0x9d, 0x19, 0x2a, 0x73, 0x36, 0x2b, 0xc6, 0xa0, + 0x33, 0xb0, 0x60, 0xd8, 0x02, 0xb2, 0x49, 0xae, 0x59, 0x55, 0xe0, 0x4a, 0x32, 0x01, 0xda, 0xca, + 0x2b, 0x29, 0xb8, 0x2f, 0x0f, 0xf9, 0x5c, 0x4d, 0xf9, 0x90, 0x25, 0x90, 0x81, 0xe6, 0x16, 0x26, + 0x54, 0xe9, 0xdc, 0xe6, 0x61, 0x7f, 0x4d, 0x50, 0xae, 0x24, 0x6d, 0x12, 0x74, 0x43, 0xf4, 0x9e, + 0x24, 0xd2, 0x4e, 0x8b, 0x31, 0x15, 0x79, 0xca, 0x92, 0x3c, 0xc9, 0x99, 0x07, 0xc7, 0xc5, 0x95, + 0x8f, 0x7c, 0xe0, 0xbf, 0xd6, 0x82, 0xbd, 0xd3, 0x7a, 0x85, 0x94, 0x8b, 0xa9, 0xcc, 0x40, 0x7f, + 0x66, 0x6a, 0x96, 0xb8, 0x84, 0x61, 0x29, 0x58, 0xce, 0x16, 0xad, 0x35, 0x7a, 0xec, 0x5f, 0x94, + 0x2e, 0x32, 0x2b, 0x53, 0x68, 0x01, 0xcf, 0x77, 0x01, 0x46, 0x4c, 0x21, 0xe5, 0x7f, 0x72, 0x83, + 0x1f, 0x08, 0x87, 0x2f, 0xe6, 0x85, 0xb1, 0xa0, 0xdf, 0xe9, 0xc2, 0xd8, 0xa8, 0xc8, 0x26, 0x73, + 0x08, 0x3f, 0xe2, 0x43, 0xb7, 0xda, 0x84, 0x5b, 0xde, 0x45, 0x7d, 0x74, 0xd2, 0x19, 0x3d, 0xa5, + 0xf5, 0x65, 0xb6, 0x13, 0xa8, 0x9a, 0x25, 0x2e, 0x61, 0xa8, 0xeb, 0xa6, 0x8b, 0x21, 0x7d, 0x3b, + 0xfe, 0x04, 0xc2, 0xbe, 0x06, 0xcb, 0xa3, 0x70, 0x79, 0x7b, 0x1c, 0x94, 0xb7, 0xc7, 0xb8, 0xce, + 0xc5, 0x5b, 0xd5, 0xf0, 0x12, 0xef, 0x1b, 0x05, 0xa2, 0xbb, 0xe7, 0xd5, 0xcf, 0xe8, 0xae, 0xbb, + 0xd3, 0xf6, 0x96, 0x17, 0x0a, 0x44, 0x74, 0xbf, 0x9a, 0xb2, 0xef, 0xa2, 0xd8, 0x6b, 0x0e, 0xbe, + 0x23, 0xfc, 0xb8, 0xdd, 0xfe, 0x4a, 0x1a, 0x1b, 0x7e, 0x68, 0x19, 0xa3, 0xff, 0x67, 0xcc, 0xd1, + 0xde, 0xd6, 0xc3, 0x6a, 0xe0, 0xe1, 0x26, 0xd3, 0x30, 0xf5, 0x1e, 0x1f, 0x48, 0x0b, 0xa9, 0xe9, + 0xee, 0xf5, 0xef, 0x9d, 0x74, 0x46, 0xa7, 0x77, 0x71, 0x15, 0x3d, 0xa8, 0x06, 0x1c, 0x9c, 0x3b, + 0xa9, 0x78, 0xad, 0x38, 0xf8, 0xfa, 0x57, 0x4f, 0xce, 0x74, 0x38, 0xc2, 0xd8, 0xc8, 0x24, 0x03, + 0xfd, 0x86, 0xa7, 0xe0, 0x5d, 0x1d, 0xd5, 0xc7, 0xbf, 0xd8, 0x56, 0xe2, 0x46, 0x57, 0xf8, 0x0c, + 0x77, 0x6c, 0x2d, 0xe3, 0xff, 0xc2, 0x51, 0xf4, 0xa8, 0x82, 0x3a, 0x8d, 0x09, 0x71, 0xb3, 0x2f, + 0x7a, 0xb9, 0x5c, 0x91, 0xe0, 0x7a, 0x45, 0x82, 0x9b, 0x15, 0x09, 0xbe, 0x94, 0x04, 0x2d, 0x4b, + 0x82, 0xae, 0x4b, 0x82, 0x6e, 0x4a, 0x82, 0x7e, 0x96, 0x04, 0x7d, 0xfb, 0x45, 0x82, 0xcb, 0xfe, + 0xae, 0x67, 0xf7, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x05, 0xe9, 0xaa, 0x07, 0xb2, 0x03, 0x00, 0x00, +} + +func (m *ClusterTrustBundle) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterTrustBundle) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterTrustBundle) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterTrustBundleList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterTrustBundleList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterTrustBundleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterTrustBundleSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterTrustBundleSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterTrustBundleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.TrustBundle) + copy(dAtA[i:], m.TrustBundle) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TrustBundle))) + i-- + dAtA[i] = 0x12 + i -= len(m.SignerName) + copy(dAtA[i:], m.SignerName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SignerName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ClusterTrustBundle) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ClusterTrustBundleList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ClusterTrustBundleSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SignerName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.TrustBundle) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ClusterTrustBundle) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterTrustBundle{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterTrustBundleSpec", "ClusterTrustBundleSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterTrustBundleList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ClusterTrustBundle{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterTrustBundle", "ClusterTrustBundle", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ClusterTrustBundleList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ClusterTrustBundleSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterTrustBundleSpec{`, + `SignerName:` + fmt.Sprintf("%v", this.SignerName) + `,`, + `TrustBundle:` + fmt.Sprintf("%v", this.TrustBundle) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ClusterTrustBundle) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterTrustBundle: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterTrustBundle: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterTrustBundleList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterTrustBundleList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterTrustBundleList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterTrustBundle{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterTrustBundleSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterTrustBundleSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterTrustBundleSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SignerName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TrustBundle", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TrustBundle = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/certificates/v1alpha1/generated.proto b/vendor/k8s.io/api/certificates/v1alpha1/generated.proto new file mode 100644 index 0000000000..b0ebc4bd45 --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1alpha1/generated.proto @@ -0,0 +1,103 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.certificates.v1alpha1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/api/certificates/v1alpha1"; + +// ClusterTrustBundle is a cluster-scoped container for X.509 trust anchors +// (root certificates). +// +// ClusterTrustBundle objects are considered to be readable by any authenticated +// user in the cluster, because they can be mounted by pods using the +// `clusterTrustBundle` projection. All service accounts have read access to +// ClusterTrustBundles by default. Users who only have namespace-level access +// to a cluster can read ClusterTrustBundles by impersonating a serviceaccount +// that they have access to. +// +// It can be optionally associated with a particular assigner, in which case it +// contains one valid set of trust anchors for that signer. Signers may have +// multiple associated ClusterTrustBundles; each is an independent set of trust +// anchors for that signer. Admission control is used to enforce that only users +// with permissions on the signer can create or modify the corresponding bundle. +message ClusterTrustBundle { + // metadata contains the object metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec contains the signer (if any) and trust anchors. + optional ClusterTrustBundleSpec spec = 2; +} + +// ClusterTrustBundleList is a collection of ClusterTrustBundle objects +message ClusterTrustBundleList { + // metadata contains the list metadata. + // + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is a collection of ClusterTrustBundle objects + repeated ClusterTrustBundle items = 2; +} + +// ClusterTrustBundleSpec contains the signer and trust anchors. +message ClusterTrustBundleSpec { + // signerName indicates the associated signer, if any. + // + // In order to create or update a ClusterTrustBundle that sets signerName, + // you must have the following cluster-scoped permission: + // group=certificates.k8s.io resource=signers resourceName= + // verb=attest. + // + // If signerName is not empty, then the ClusterTrustBundle object must be + // named with the signer name as a prefix (translating slashes to colons). + // For example, for the signer name `example.com/foo`, valid + // ClusterTrustBundle object names include `example.com:foo:abc` and + // `example.com:foo:v1`. + // + // If signerName is empty, then the ClusterTrustBundle object's name must + // not have such a prefix. + // + // List/watch requests for ClusterTrustBundles can filter on this field + // using a `spec.signerName=NAME` field selector. + // + // +optional + optional string signerName = 1; + + // trustBundle contains the individual X.509 trust anchors for this + // bundle, as PEM bundle of PEM-wrapped, DER-formatted X.509 certificates. + // + // The data must consist only of PEM certificate blocks that parse as valid + // X.509 certificates. Each certificate must include a basic constraints + // extension with the CA bit set. The API server will reject objects that + // contain duplicate certificates, or that use PEM block headers. + // + // Users of ClusterTrustBundles, including Kubelet, are free to reorder and + // deduplicate certificate blocks in this file according to their own logic, + // as well as to drop PEM block headers and inter-block data. + optional string trustBundle = 2; +} + diff --git a/vendor/k8s.io/api/certificates/v1alpha1/register.go b/vendor/k8s.io/api/certificates/v1alpha1/register.go new file mode 100644 index 0000000000..7288ed9a3e --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1alpha1/register.go @@ -0,0 +1,61 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "certificates.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder is the scheme builder with scheme init functions to run for this API package + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + + localSchemeBuilder = &SchemeBuilder + + // AddToScheme is a global function that registers this API group & version to a scheme + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &ClusterTrustBundle{}, + &ClusterTrustBundleList{}, + ) + + // Add the watch version that applies + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/certificates/v1alpha1/types.go b/vendor/k8s.io/api/certificates/v1alpha1/types.go new file mode 100644 index 0000000000..1a9fda0112 --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1alpha1/types.go @@ -0,0 +1,106 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:prerelease-lifecycle-gen:introduced=1.26 +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterTrustBundle is a cluster-scoped container for X.509 trust anchors +// (root certificates). +// +// ClusterTrustBundle objects are considered to be readable by any authenticated +// user in the cluster, because they can be mounted by pods using the +// `clusterTrustBundle` projection. All service accounts have read access to +// ClusterTrustBundles by default. Users who only have namespace-level access +// to a cluster can read ClusterTrustBundles by impersonating a serviceaccount +// that they have access to. +// +// It can be optionally associated with a particular assigner, in which case it +// contains one valid set of trust anchors for that signer. Signers may have +// multiple associated ClusterTrustBundles; each is an independent set of trust +// anchors for that signer. Admission control is used to enforce that only users +// with permissions on the signer can create or modify the corresponding bundle. +type ClusterTrustBundle struct { + metav1.TypeMeta `json:",inline"` + + // metadata contains the object metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec contains the signer (if any) and trust anchors. + Spec ClusterTrustBundleSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` +} + +// ClusterTrustBundleSpec contains the signer and trust anchors. +type ClusterTrustBundleSpec struct { + // signerName indicates the associated signer, if any. + // + // In order to create or update a ClusterTrustBundle that sets signerName, + // you must have the following cluster-scoped permission: + // group=certificates.k8s.io resource=signers resourceName= + // verb=attest. + // + // If signerName is not empty, then the ClusterTrustBundle object must be + // named with the signer name as a prefix (translating slashes to colons). + // For example, for the signer name `example.com/foo`, valid + // ClusterTrustBundle object names include `example.com:foo:abc` and + // `example.com:foo:v1`. + // + // If signerName is empty, then the ClusterTrustBundle object's name must + // not have such a prefix. + // + // List/watch requests for ClusterTrustBundles can filter on this field + // using a `spec.signerName=NAME` field selector. + // + // +optional + SignerName string `json:"signerName,omitempty" protobuf:"bytes,1,opt,name=signerName"` + + // trustBundle contains the individual X.509 trust anchors for this + // bundle, as PEM bundle of PEM-wrapped, DER-formatted X.509 certificates. + // + // The data must consist only of PEM certificate blocks that parse as valid + // X.509 certificates. Each certificate must include a basic constraints + // extension with the CA bit set. The API server will reject objects that + // contain duplicate certificates, or that use PEM block headers. + // + // Users of ClusterTrustBundles, including Kubelet, are free to reorder and + // deduplicate certificate blocks in this file according to their own logic, + // as well as to drop PEM block headers and inter-block data. + TrustBundle string `json:"trustBundle" protobuf:"bytes,2,opt,name=trustBundle"` +} + +// +k8s:prerelease-lifecycle-gen:introduced=1.26 +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterTrustBundleList is a collection of ClusterTrustBundle objects +type ClusterTrustBundleList struct { + metav1.TypeMeta `json:",inline"` + + // metadata contains the list metadata. + // + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a collection of ClusterTrustBundle objects + Items []ClusterTrustBundle `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/certificates/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/certificates/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 0000000000..bff649e3cb --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,60 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-codegen.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_ClusterTrustBundle = map[string]string{ + "": "ClusterTrustBundle is a cluster-scoped container for X.509 trust anchors (root certificates).\n\nClusterTrustBundle objects are considered to be readable by any authenticated user in the cluster, because they can be mounted by pods using the `clusterTrustBundle` projection. All service accounts have read access to ClusterTrustBundles by default. Users who only have namespace-level access to a cluster can read ClusterTrustBundles by impersonating a serviceaccount that they have access to.\n\nIt can be optionally associated with a particular assigner, in which case it contains one valid set of trust anchors for that signer. Signers may have multiple associated ClusterTrustBundles; each is an independent set of trust anchors for that signer. Admission control is used to enforce that only users with permissions on the signer can create or modify the corresponding bundle.", + "metadata": "metadata contains the object metadata.", + "spec": "spec contains the signer (if any) and trust anchors.", +} + +func (ClusterTrustBundle) SwaggerDoc() map[string]string { + return map_ClusterTrustBundle +} + +var map_ClusterTrustBundleList = map[string]string{ + "": "ClusterTrustBundleList is a collection of ClusterTrustBundle objects", + "metadata": "metadata contains the list metadata.", + "items": "items is a collection of ClusterTrustBundle objects", +} + +func (ClusterTrustBundleList) SwaggerDoc() map[string]string { + return map_ClusterTrustBundleList +} + +var map_ClusterTrustBundleSpec = map[string]string{ + "": "ClusterTrustBundleSpec contains the signer and trust anchors.", + "signerName": "signerName indicates the associated signer, if any.\n\nIn order to create or update a ClusterTrustBundle that sets signerName, you must have the following cluster-scoped permission: group=certificates.k8s.io resource=signers resourceName= verb=attest.\n\nIf signerName is not empty, then the ClusterTrustBundle object must be named with the signer name as a prefix (translating slashes to colons). For example, for the signer name `example.com/foo`, valid ClusterTrustBundle object names include `example.com:foo:abc` and `example.com:foo:v1`.\n\nIf signerName is empty, then the ClusterTrustBundle object's name must not have such a prefix.\n\nList/watch requests for ClusterTrustBundles can filter on this field using a `spec.signerName=NAME` field selector.", + "trustBundle": "trustBundle contains the individual X.509 trust anchors for this bundle, as PEM bundle of PEM-wrapped, DER-formatted X.509 certificates.\n\nThe data must consist only of PEM certificate blocks that parse as valid X.509 certificates. Each certificate must include a basic constraints extension with the CA bit set. The API server will reject objects that contain duplicate certificates, or that use PEM block headers.\n\nUsers of ClusterTrustBundles, including Kubelet, are free to reorder and deduplicate certificate blocks in this file according to their own logic, as well as to drop PEM block headers and inter-block data.", +} + +func (ClusterTrustBundleSpec) SwaggerDoc() map[string]string { + return map_ClusterTrustBundleSpec +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..30a4dc1e80 --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,102 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterTrustBundle) DeepCopyInto(out *ClusterTrustBundle) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTrustBundle. +func (in *ClusterTrustBundle) DeepCopy() *ClusterTrustBundle { + if in == nil { + return nil + } + out := new(ClusterTrustBundle) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterTrustBundle) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterTrustBundleList) DeepCopyInto(out *ClusterTrustBundleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterTrustBundle, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTrustBundleList. +func (in *ClusterTrustBundleList) DeepCopy() *ClusterTrustBundleList { + if in == nil { + return nil + } + out := new(ClusterTrustBundleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterTrustBundleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterTrustBundleSpec) DeepCopyInto(out *ClusterTrustBundleSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTrustBundleSpec. +func (in *ClusterTrustBundleSpec) DeepCopy() *ClusterTrustBundleSpec { + if in == nil { + return nil + } + out := new(ClusterTrustBundleSpec) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 0000000000..dfafa656cc --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,58 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1alpha1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ClusterTrustBundle) APILifecycleIntroduced() (major, minor int) { + return 1, 26 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ClusterTrustBundle) APILifecycleDeprecated() (major, minor int) { + return 1, 29 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ClusterTrustBundle) APILifecycleRemoved() (major, minor int) { + return 1, 32 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ClusterTrustBundleList) APILifecycleIntroduced() (major, minor int) { + return 1, 26 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ClusterTrustBundleList) APILifecycleDeprecated() (major, minor int) { + return 1, 29 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ClusterTrustBundleList) APILifecycleRemoved() (major, minor int) { + return 1, 32 +} diff --git a/vendor/k8s.io/api/certificates/v1beta1/generated.proto b/vendor/k8s.io/api/certificates/v1beta1/generated.proto index e246fba021..f70f01ef7a 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/generated.proto +++ b/vendor/k8s.io/api/certificates/v1beta1/generated.proto @@ -124,8 +124,10 @@ message CertificateSigningRequestSpec { // allowedUsages specifies a set of usage contexts the key will be // valid for. - // See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 - // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 + // See: + // https://tools.ietf.org/html/rfc5280#section-4.2.1.3 + // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 + // // Valid values are: // "signing", // "digital signature", diff --git a/vendor/k8s.io/api/certificates/v1beta1/types.go b/vendor/k8s.io/api/certificates/v1beta1/types.go index fe7aab9704..7e5a5c198a 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/types.go +++ b/vendor/k8s.io/api/certificates/v1beta1/types.go @@ -89,8 +89,10 @@ type CertificateSigningRequestSpec struct { // allowedUsages specifies a set of usage contexts the key will be // valid for. - // See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 - // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 + // See: + // https://tools.ietf.org/html/rfc5280#section-4.2.1.3 + // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 + // // Valid values are: // "signing", // "digital signature", @@ -229,8 +231,9 @@ type CertificateSigningRequestList struct { } // KeyUsages specifies valid usage contexts for keys. -// See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 +// See: // +// https://tools.ietf.org/html/rfc5280#section-4.2.1.3 // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 type KeyUsage string diff --git a/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go index d3f318150c..f9ab1f13de 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CertificateSigningRequest = map[string]string{ @@ -55,7 +55,7 @@ var map_CertificateSigningRequestSpec = map[string]string{ "request": "Base64-encoded PKCS#10 CSR data", "signerName": "Requested signer for the request. It is a qualified name in the form: `scope-hostname.io/name`. If empty, it will be defaulted:\n 1. If it's a kubelet client certificate, it is assigned\n \"kubernetes.io/kube-apiserver-client-kubelet\".\n 2. If it's a kubelet serving certificate, it is assigned\n \"kubernetes.io/kubelet-serving\".\n 3. Otherwise, it is assigned \"kubernetes.io/legacy-unknown\".\nDistribution of trust for signers happens out of band. You can select on this field using `spec.signerName`.", "expirationSeconds": "expirationSeconds is the requested duration of validity of the issued certificate. The certificate signer may issue a certificate with a different validity duration so a client must check the delta between the notBefore and and notAfter fields in the issued certificate to determine the actual duration.\n\nThe v1.22+ in-tree implementations of the well-known Kubernetes signers will honor this field as long as the requested duration is not greater than the maximum duration they will honor per the --cluster-signing-duration CLI flag to the Kubernetes controller manager.\n\nCertificate signers may not honor this field for various reasons:\n\n 1. Old signer that is unaware of the field (such as the in-tree\n implementations prior to v1.22)\n 2. Signer whose configured maximum is shorter than the requested duration\n 3. Signer whose configured minimum is longer than the requested duration\n\nThe minimum valid value for expirationSeconds is 600, i.e. 10 minutes.", - "usages": "allowedUsages specifies a set of usage contexts the key will be valid for. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3\n https://tools.ietf.org/html/rfc5280#section-4.2.1.12\nValid values are:\n \"signing\",\n \"digital signature\",\n \"content commitment\",\n \"key encipherment\",\n \"key agreement\",\n \"data encipherment\",\n \"cert sign\",\n \"crl sign\",\n \"encipher only\",\n \"decipher only\",\n \"any\",\n \"server auth\",\n \"client auth\",\n \"code signing\",\n \"email protection\",\n \"s/mime\",\n \"ipsec end system\",\n \"ipsec tunnel\",\n \"ipsec user\",\n \"timestamping\",\n \"ocsp signing\",\n \"microsoft sgc\",\n \"netscape sgc\"", + "usages": "allowedUsages specifies a set of usage contexts the key will be valid for. See:\n\thttps://tools.ietf.org/html/rfc5280#section-4.2.1.3\n\thttps://tools.ietf.org/html/rfc5280#section-4.2.1.12\n\nValid values are:\n \"signing\",\n \"digital signature\",\n \"content commitment\",\n \"key encipherment\",\n \"key agreement\",\n \"data encipherment\",\n \"cert sign\",\n \"crl sign\",\n \"encipher only\",\n \"decipher only\",\n \"any\",\n \"server auth\",\n \"client auth\",\n \"code signing\",\n \"email protection\",\n \"s/mime\",\n \"ipsec end system\",\n \"ipsec tunnel\",\n \"ipsec user\",\n \"timestamping\",\n \"ocsp signing\",\n \"microsoft sgc\",\n \"netscape sgc\"", "username": "Information about the requesting user. See user.Info interface for details.", "uid": "UID information about the requesting user. See user.Info interface for details.", "groups": "Group information about the requesting user. See user.Info interface for details.", diff --git a/vendor/k8s.io/api/coordination/v1/generated.proto b/vendor/k8s.io/api/coordination/v1/generated.proto index b1efb737f0..36fce60f2d 100644 --- a/vendor/k8s.io/api/coordination/v1/generated.proto +++ b/vendor/k8s.io/api/coordination/v1/generated.proto @@ -34,7 +34,7 @@ message Lease { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the Lease. + // spec contains the specification of the Lease. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional LeaseSpec spec = 2; @@ -47,7 +47,7 @@ message LeaseList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of schema objects. + // items is a list of schema objects. repeated Lease items = 2; } @@ -59,7 +59,7 @@ message LeaseSpec { // leaseDurationSeconds is a duration that candidates for a lease need // to wait to force acquire it. This is measure against time of last - // observed RenewTime. + // observed renewTime. // +optional optional int32 leaseDurationSeconds = 2; diff --git a/vendor/k8s.io/api/coordination/v1/types.go b/vendor/k8s.io/api/coordination/v1/types.go index 7a5605ace1..b0e1d06829 100644 --- a/vendor/k8s.io/api/coordination/v1/types.go +++ b/vendor/k8s.io/api/coordination/v1/types.go @@ -30,7 +30,7 @@ type Lease struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the Lease. + // spec contains the specification of the Lease. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec LeaseSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -43,7 +43,7 @@ type LeaseSpec struct { HolderIdentity *string `json:"holderIdentity,omitempty" protobuf:"bytes,1,opt,name=holderIdentity"` // leaseDurationSeconds is a duration that candidates for a lease need // to wait to force acquire it. This is measure against time of last - // observed RenewTime. + // observed renewTime. // +optional LeaseDurationSeconds *int32 `json:"leaseDurationSeconds,omitempty" protobuf:"varint,2,opt,name=leaseDurationSeconds"` // acquireTime is a time when the current lease was acquired. @@ -69,6 +69,6 @@ type LeaseList struct { // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of schema objects. + // items is a list of schema objects. Items []Lease `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go index 0f14404308..f3720eca02 100644 --- a/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go @@ -24,13 +24,13 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Lease = map[string]string{ "": "Lease defines a lease concept.", "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec contains the specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Lease) SwaggerDoc() map[string]string { @@ -40,7 +40,7 @@ func (Lease) SwaggerDoc() map[string]string { var map_LeaseList = map[string]string{ "": "LeaseList is a list of Lease objects.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", + "items": "items is a list of schema objects.", } func (LeaseList) SwaggerDoc() map[string]string { @@ -50,7 +50,7 @@ func (LeaseList) SwaggerDoc() map[string]string { var map_LeaseSpec = map[string]string{ "": "LeaseSpec is a specification of a Lease.", "holderIdentity": "holderIdentity contains the identity of the holder of a current lease.", - "leaseDurationSeconds": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed RenewTime.", + "leaseDurationSeconds": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed renewTime.", "acquireTime": "acquireTime is a time when the current lease was acquired.", "renewTime": "renewTime is a time when the current holder of a lease has last updated the lease.", "leaseTransitions": "leaseTransitions is the number of transitions of a lease between holders.", diff --git a/vendor/k8s.io/api/coordination/v1beta1/generated.proto b/vendor/k8s.io/api/coordination/v1beta1/generated.proto index 85faa3b09b..92c8918b80 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/generated.proto +++ b/vendor/k8s.io/api/coordination/v1beta1/generated.proto @@ -34,7 +34,7 @@ message Lease { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the Lease. + // spec contains the specification of the Lease. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional LeaseSpec spec = 2; @@ -47,7 +47,7 @@ message LeaseList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of schema objects. + // items is a list of schema objects. repeated Lease items = 2; } @@ -59,7 +59,7 @@ message LeaseSpec { // leaseDurationSeconds is a duration that candidates for a lease need // to wait to force acquire it. This is measure against time of last - // observed RenewTime. + // observed renewTime. // +optional optional int32 leaseDurationSeconds = 2; diff --git a/vendor/k8s.io/api/coordination/v1beta1/types.go b/vendor/k8s.io/api/coordination/v1beta1/types.go index 8f300fca85..3a3d5f32e2 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/types.go +++ b/vendor/k8s.io/api/coordination/v1beta1/types.go @@ -33,7 +33,7 @@ type Lease struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the Lease. + // spec contains the specification of the Lease. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec LeaseSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -46,7 +46,7 @@ type LeaseSpec struct { HolderIdentity *string `json:"holderIdentity,omitempty" protobuf:"bytes,1,opt,name=holderIdentity"` // leaseDurationSeconds is a duration that candidates for a lease need // to wait to force acquire it. This is measure against time of last - // observed RenewTime. + // observed renewTime. // +optional LeaseDurationSeconds *int32 `json:"leaseDurationSeconds,omitempty" protobuf:"varint,2,opt,name=leaseDurationSeconds"` // acquireTime is a time when the current lease was acquired. @@ -75,6 +75,6 @@ type LeaseList struct { // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of schema objects. + // items is a list of schema objects. Items []Lease `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go index f557d265d4..78ca4e393f 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go @@ -24,13 +24,13 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Lease = map[string]string{ "": "Lease defines a lease concept.", "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec contains the specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Lease) SwaggerDoc() map[string]string { @@ -40,7 +40,7 @@ func (Lease) SwaggerDoc() map[string]string { var map_LeaseList = map[string]string{ "": "LeaseList is a list of Lease objects.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", + "items": "items is a list of schema objects.", } func (LeaseList) SwaggerDoc() map[string]string { @@ -50,7 +50,7 @@ func (LeaseList) SwaggerDoc() map[string]string { var map_LeaseSpec = map[string]string{ "": "LeaseSpec is a specification of a Lease.", "holderIdentity": "holderIdentity contains the identity of the holder of a current lease.", - "leaseDurationSeconds": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed RenewTime.", + "leaseDurationSeconds": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed renewTime.", "acquireTime": "acquireTime is a time when the current lease was acquired.", "renewTime": "renewTime is a time when the current holder of a lease has last updated the lease.", "leaseTransitions": "leaseTransitions is the number of transitions of a lease between holders.", diff --git a/vendor/k8s.io/api/core/v1/annotation_key_constants.go b/vendor/k8s.io/api/core/v1/annotation_key_constants.go index eb9517e1dd..61f86f850a 100644 --- a/vendor/k8s.io/api/core/v1/annotation_key_constants.go +++ b/vendor/k8s.io/api/core/v1/annotation_key_constants.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This file should be consistent with pkg/api/annotation_key_constants.go. +// This file should be consistent with pkg/apis/core/annotation_key_constants.go. package v1 @@ -144,8 +144,19 @@ const ( // This annotation is beta-level and is only honored when PodDeletionCost feature is enabled. PodDeletionCost = "controller.kubernetes.io/pod-deletion-cost" - // AnnotationTopologyAwareHints can be used to enable or disable Topology - // Aware Hints for a Service. This may be set to "Auto" or "Disabled". Any - // other value is treated as "Disabled". - AnnotationTopologyAwareHints = "service.kubernetes.io/topology-aware-hints" + // DeprecatedAnnotationTopologyAwareHints can be used to enable or disable + // Topology Aware Hints for a Service. This may be set to "Auto" or + // "Disabled". Any other value is treated as "Disabled". This annotation has + // been deprecated in favor of the "service.kubernetes.io/topology-mode" + // annotation. + DeprecatedAnnotationTopologyAwareHints = "service.kubernetes.io/topology-aware-hints" + + // AnnotationTopologyMode can be used to enable or disable Topology Aware + // Routing for a Service. Well known values are "Auto" and "Disabled". + // Implementations may choose to develop new topology approaches, exposing + // them with domain-prefixed values. For example, "example.com/lowest-rtt" + // could be a valid implementation-specific value for this annotation. These + // heuristics will often populate topology hints on EndpointSlices, but that + // is not a requirement. + AnnotationTopologyMode = "service.kubernetes.io/topology-mode" ) diff --git a/vendor/k8s.io/api/core/v1/generated.pb.go b/vendor/k8s.io/api/core/v1/generated.pb.go index a8df2b222e..c766462960 100644 --- a/vendor/k8s.io/api/core/v1/generated.pb.go +++ b/vendor/k8s.io/api/core/v1/generated.pb.go @@ -889,10 +889,38 @@ func (m *ContainerPort) XXX_DiscardUnknown() { var xxx_messageInfo_ContainerPort proto.InternalMessageInfo +func (m *ContainerResizePolicy) Reset() { *m = ContainerResizePolicy{} } +func (*ContainerResizePolicy) ProtoMessage() {} +func (*ContainerResizePolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{30} +} +func (m *ContainerResizePolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerResizePolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerResizePolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerResizePolicy.Merge(m, src) +} +func (m *ContainerResizePolicy) XXX_Size() int { + return m.Size() +} +func (m *ContainerResizePolicy) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerResizePolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerResizePolicy proto.InternalMessageInfo + func (m *ContainerState) Reset() { *m = ContainerState{} } func (*ContainerState) ProtoMessage() {} func (*ContainerState) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{30} + return fileDescriptor_83c10c24ec417dc9, []int{31} } func (m *ContainerState) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -920,7 +948,7 @@ var xxx_messageInfo_ContainerState proto.InternalMessageInfo func (m *ContainerStateRunning) Reset() { *m = ContainerStateRunning{} } func (*ContainerStateRunning) ProtoMessage() {} func (*ContainerStateRunning) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{31} + return fileDescriptor_83c10c24ec417dc9, []int{32} } func (m *ContainerStateRunning) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -948,7 +976,7 @@ var xxx_messageInfo_ContainerStateRunning proto.InternalMessageInfo func (m *ContainerStateTerminated) Reset() { *m = ContainerStateTerminated{} } func (*ContainerStateTerminated) ProtoMessage() {} func (*ContainerStateTerminated) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{32} + return fileDescriptor_83c10c24ec417dc9, []int{33} } func (m *ContainerStateTerminated) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -976,7 +1004,7 @@ var xxx_messageInfo_ContainerStateTerminated proto.InternalMessageInfo func (m *ContainerStateWaiting) Reset() { *m = ContainerStateWaiting{} } func (*ContainerStateWaiting) ProtoMessage() {} func (*ContainerStateWaiting) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{33} + return fileDescriptor_83c10c24ec417dc9, []int{34} } func (m *ContainerStateWaiting) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1004,7 +1032,7 @@ var xxx_messageInfo_ContainerStateWaiting proto.InternalMessageInfo func (m *ContainerStatus) Reset() { *m = ContainerStatus{} } func (*ContainerStatus) ProtoMessage() {} func (*ContainerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{34} + return fileDescriptor_83c10c24ec417dc9, []int{35} } func (m *ContainerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1032,7 +1060,7 @@ var xxx_messageInfo_ContainerStatus proto.InternalMessageInfo func (m *DaemonEndpoint) Reset() { *m = DaemonEndpoint{} } func (*DaemonEndpoint) ProtoMessage() {} func (*DaemonEndpoint) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{35} + return fileDescriptor_83c10c24ec417dc9, []int{36} } func (m *DaemonEndpoint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1060,7 +1088,7 @@ var xxx_messageInfo_DaemonEndpoint proto.InternalMessageInfo func (m *DownwardAPIProjection) Reset() { *m = DownwardAPIProjection{} } func (*DownwardAPIProjection) ProtoMessage() {} func (*DownwardAPIProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{36} + return fileDescriptor_83c10c24ec417dc9, []int{37} } func (m *DownwardAPIProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1088,7 +1116,7 @@ var xxx_messageInfo_DownwardAPIProjection proto.InternalMessageInfo func (m *DownwardAPIVolumeFile) Reset() { *m = DownwardAPIVolumeFile{} } func (*DownwardAPIVolumeFile) ProtoMessage() {} func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{37} + return fileDescriptor_83c10c24ec417dc9, []int{38} } func (m *DownwardAPIVolumeFile) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1116,7 +1144,7 @@ var xxx_messageInfo_DownwardAPIVolumeFile proto.InternalMessageInfo func (m *DownwardAPIVolumeSource) Reset() { *m = DownwardAPIVolumeSource{} } func (*DownwardAPIVolumeSource) ProtoMessage() {} func (*DownwardAPIVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{38} + return fileDescriptor_83c10c24ec417dc9, []int{39} } func (m *DownwardAPIVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1144,7 +1172,7 @@ var xxx_messageInfo_DownwardAPIVolumeSource proto.InternalMessageInfo func (m *EmptyDirVolumeSource) Reset() { *m = EmptyDirVolumeSource{} } func (*EmptyDirVolumeSource) ProtoMessage() {} func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{39} + return fileDescriptor_83c10c24ec417dc9, []int{40} } func (m *EmptyDirVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1172,7 +1200,7 @@ var xxx_messageInfo_EmptyDirVolumeSource proto.InternalMessageInfo func (m *EndpointAddress) Reset() { *m = EndpointAddress{} } func (*EndpointAddress) ProtoMessage() {} func (*EndpointAddress) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{40} + return fileDescriptor_83c10c24ec417dc9, []int{41} } func (m *EndpointAddress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1200,7 +1228,7 @@ var xxx_messageInfo_EndpointAddress proto.InternalMessageInfo func (m *EndpointPort) Reset() { *m = EndpointPort{} } func (*EndpointPort) ProtoMessage() {} func (*EndpointPort) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{41} + return fileDescriptor_83c10c24ec417dc9, []int{42} } func (m *EndpointPort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1228,7 +1256,7 @@ var xxx_messageInfo_EndpointPort proto.InternalMessageInfo func (m *EndpointSubset) Reset() { *m = EndpointSubset{} } func (*EndpointSubset) ProtoMessage() {} func (*EndpointSubset) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{42} + return fileDescriptor_83c10c24ec417dc9, []int{43} } func (m *EndpointSubset) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1256,7 +1284,7 @@ var xxx_messageInfo_EndpointSubset proto.InternalMessageInfo func (m *Endpoints) Reset() { *m = Endpoints{} } func (*Endpoints) ProtoMessage() {} func (*Endpoints) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{43} + return fileDescriptor_83c10c24ec417dc9, []int{44} } func (m *Endpoints) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1284,7 +1312,7 @@ var xxx_messageInfo_Endpoints proto.InternalMessageInfo func (m *EndpointsList) Reset() { *m = EndpointsList{} } func (*EndpointsList) ProtoMessage() {} func (*EndpointsList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{44} + return fileDescriptor_83c10c24ec417dc9, []int{45} } func (m *EndpointsList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1312,7 +1340,7 @@ var xxx_messageInfo_EndpointsList proto.InternalMessageInfo func (m *EnvFromSource) Reset() { *m = EnvFromSource{} } func (*EnvFromSource) ProtoMessage() {} func (*EnvFromSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{45} + return fileDescriptor_83c10c24ec417dc9, []int{46} } func (m *EnvFromSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1340,7 +1368,7 @@ var xxx_messageInfo_EnvFromSource proto.InternalMessageInfo func (m *EnvVar) Reset() { *m = EnvVar{} } func (*EnvVar) ProtoMessage() {} func (*EnvVar) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{46} + return fileDescriptor_83c10c24ec417dc9, []int{47} } func (m *EnvVar) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1368,7 +1396,7 @@ var xxx_messageInfo_EnvVar proto.InternalMessageInfo func (m *EnvVarSource) Reset() { *m = EnvVarSource{} } func (*EnvVarSource) ProtoMessage() {} func (*EnvVarSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{47} + return fileDescriptor_83c10c24ec417dc9, []int{48} } func (m *EnvVarSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1396,7 +1424,7 @@ var xxx_messageInfo_EnvVarSource proto.InternalMessageInfo func (m *EphemeralContainer) Reset() { *m = EphemeralContainer{} } func (*EphemeralContainer) ProtoMessage() {} func (*EphemeralContainer) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{48} + return fileDescriptor_83c10c24ec417dc9, []int{49} } func (m *EphemeralContainer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1424,7 +1452,7 @@ var xxx_messageInfo_EphemeralContainer proto.InternalMessageInfo func (m *EphemeralContainerCommon) Reset() { *m = EphemeralContainerCommon{} } func (*EphemeralContainerCommon) ProtoMessage() {} func (*EphemeralContainerCommon) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{49} + return fileDescriptor_83c10c24ec417dc9, []int{50} } func (m *EphemeralContainerCommon) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1452,7 +1480,7 @@ var xxx_messageInfo_EphemeralContainerCommon proto.InternalMessageInfo func (m *EphemeralVolumeSource) Reset() { *m = EphemeralVolumeSource{} } func (*EphemeralVolumeSource) ProtoMessage() {} func (*EphemeralVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{50} + return fileDescriptor_83c10c24ec417dc9, []int{51} } func (m *EphemeralVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1480,7 +1508,7 @@ var xxx_messageInfo_EphemeralVolumeSource proto.InternalMessageInfo func (m *Event) Reset() { *m = Event{} } func (*Event) ProtoMessage() {} func (*Event) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{51} + return fileDescriptor_83c10c24ec417dc9, []int{52} } func (m *Event) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1508,7 +1536,7 @@ var xxx_messageInfo_Event proto.InternalMessageInfo func (m *EventList) Reset() { *m = EventList{} } func (*EventList) ProtoMessage() {} func (*EventList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{52} + return fileDescriptor_83c10c24ec417dc9, []int{53} } func (m *EventList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1536,7 +1564,7 @@ var xxx_messageInfo_EventList proto.InternalMessageInfo func (m *EventSeries) Reset() { *m = EventSeries{} } func (*EventSeries) ProtoMessage() {} func (*EventSeries) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{53} + return fileDescriptor_83c10c24ec417dc9, []int{54} } func (m *EventSeries) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1564,7 +1592,7 @@ var xxx_messageInfo_EventSeries proto.InternalMessageInfo func (m *EventSource) Reset() { *m = EventSource{} } func (*EventSource) ProtoMessage() {} func (*EventSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{54} + return fileDescriptor_83c10c24ec417dc9, []int{55} } func (m *EventSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1592,7 +1620,7 @@ var xxx_messageInfo_EventSource proto.InternalMessageInfo func (m *ExecAction) Reset() { *m = ExecAction{} } func (*ExecAction) ProtoMessage() {} func (*ExecAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{55} + return fileDescriptor_83c10c24ec417dc9, []int{56} } func (m *ExecAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1620,7 +1648,7 @@ var xxx_messageInfo_ExecAction proto.InternalMessageInfo func (m *FCVolumeSource) Reset() { *m = FCVolumeSource{} } func (*FCVolumeSource) ProtoMessage() {} func (*FCVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{56} + return fileDescriptor_83c10c24ec417dc9, []int{57} } func (m *FCVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1648,7 +1676,7 @@ var xxx_messageInfo_FCVolumeSource proto.InternalMessageInfo func (m *FlexPersistentVolumeSource) Reset() { *m = FlexPersistentVolumeSource{} } func (*FlexPersistentVolumeSource) ProtoMessage() {} func (*FlexPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{57} + return fileDescriptor_83c10c24ec417dc9, []int{58} } func (m *FlexPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1676,7 +1704,7 @@ var xxx_messageInfo_FlexPersistentVolumeSource proto.InternalMessageInfo func (m *FlexVolumeSource) Reset() { *m = FlexVolumeSource{} } func (*FlexVolumeSource) ProtoMessage() {} func (*FlexVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{58} + return fileDescriptor_83c10c24ec417dc9, []int{59} } func (m *FlexVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1704,7 +1732,7 @@ var xxx_messageInfo_FlexVolumeSource proto.InternalMessageInfo func (m *FlockerVolumeSource) Reset() { *m = FlockerVolumeSource{} } func (*FlockerVolumeSource) ProtoMessage() {} func (*FlockerVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{59} + return fileDescriptor_83c10c24ec417dc9, []int{60} } func (m *FlockerVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1732,7 +1760,7 @@ var xxx_messageInfo_FlockerVolumeSource proto.InternalMessageInfo func (m *GCEPersistentDiskVolumeSource) Reset() { *m = GCEPersistentDiskVolumeSource{} } func (*GCEPersistentDiskVolumeSource) ProtoMessage() {} func (*GCEPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{60} + return fileDescriptor_83c10c24ec417dc9, []int{61} } func (m *GCEPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1760,7 +1788,7 @@ var xxx_messageInfo_GCEPersistentDiskVolumeSource proto.InternalMessageInfo func (m *GRPCAction) Reset() { *m = GRPCAction{} } func (*GRPCAction) ProtoMessage() {} func (*GRPCAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{61} + return fileDescriptor_83c10c24ec417dc9, []int{62} } func (m *GRPCAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1788,7 +1816,7 @@ var xxx_messageInfo_GRPCAction proto.InternalMessageInfo func (m *GitRepoVolumeSource) Reset() { *m = GitRepoVolumeSource{} } func (*GitRepoVolumeSource) ProtoMessage() {} func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{62} + return fileDescriptor_83c10c24ec417dc9, []int{63} } func (m *GitRepoVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1816,7 +1844,7 @@ var xxx_messageInfo_GitRepoVolumeSource proto.InternalMessageInfo func (m *GlusterfsPersistentVolumeSource) Reset() { *m = GlusterfsPersistentVolumeSource{} } func (*GlusterfsPersistentVolumeSource) ProtoMessage() {} func (*GlusterfsPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{63} + return fileDescriptor_83c10c24ec417dc9, []int{64} } func (m *GlusterfsPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1844,7 +1872,7 @@ var xxx_messageInfo_GlusterfsPersistentVolumeSource proto.InternalMessageInfo func (m *GlusterfsVolumeSource) Reset() { *m = GlusterfsVolumeSource{} } func (*GlusterfsVolumeSource) ProtoMessage() {} func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{64} + return fileDescriptor_83c10c24ec417dc9, []int{65} } func (m *GlusterfsVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1872,7 +1900,7 @@ var xxx_messageInfo_GlusterfsVolumeSource proto.InternalMessageInfo func (m *HTTPGetAction) Reset() { *m = HTTPGetAction{} } func (*HTTPGetAction) ProtoMessage() {} func (*HTTPGetAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{65} + return fileDescriptor_83c10c24ec417dc9, []int{66} } func (m *HTTPGetAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1900,7 +1928,7 @@ var xxx_messageInfo_HTTPGetAction proto.InternalMessageInfo func (m *HTTPHeader) Reset() { *m = HTTPHeader{} } func (*HTTPHeader) ProtoMessage() {} func (*HTTPHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{66} + return fileDescriptor_83c10c24ec417dc9, []int{67} } func (m *HTTPHeader) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1928,7 +1956,7 @@ var xxx_messageInfo_HTTPHeader proto.InternalMessageInfo func (m *HostAlias) Reset() { *m = HostAlias{} } func (*HostAlias) ProtoMessage() {} func (*HostAlias) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{67} + return fileDescriptor_83c10c24ec417dc9, []int{68} } func (m *HostAlias) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1956,7 +1984,7 @@ var xxx_messageInfo_HostAlias proto.InternalMessageInfo func (m *HostPathVolumeSource) Reset() { *m = HostPathVolumeSource{} } func (*HostPathVolumeSource) ProtoMessage() {} func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{68} + return fileDescriptor_83c10c24ec417dc9, []int{69} } func (m *HostPathVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1984,7 +2012,7 @@ var xxx_messageInfo_HostPathVolumeSource proto.InternalMessageInfo func (m *ISCSIPersistentVolumeSource) Reset() { *m = ISCSIPersistentVolumeSource{} } func (*ISCSIPersistentVolumeSource) ProtoMessage() {} func (*ISCSIPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{69} + return fileDescriptor_83c10c24ec417dc9, []int{70} } func (m *ISCSIPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2012,7 +2040,7 @@ var xxx_messageInfo_ISCSIPersistentVolumeSource proto.InternalMessageInfo func (m *ISCSIVolumeSource) Reset() { *m = ISCSIVolumeSource{} } func (*ISCSIVolumeSource) ProtoMessage() {} func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{70} + return fileDescriptor_83c10c24ec417dc9, []int{71} } func (m *ISCSIVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2040,7 +2068,7 @@ var xxx_messageInfo_ISCSIVolumeSource proto.InternalMessageInfo func (m *KeyToPath) Reset() { *m = KeyToPath{} } func (*KeyToPath) ProtoMessage() {} func (*KeyToPath) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{71} + return fileDescriptor_83c10c24ec417dc9, []int{72} } func (m *KeyToPath) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2068,7 +2096,7 @@ var xxx_messageInfo_KeyToPath proto.InternalMessageInfo func (m *Lifecycle) Reset() { *m = Lifecycle{} } func (*Lifecycle) ProtoMessage() {} func (*Lifecycle) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{72} + return fileDescriptor_83c10c24ec417dc9, []int{73} } func (m *Lifecycle) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2096,7 +2124,7 @@ var xxx_messageInfo_Lifecycle proto.InternalMessageInfo func (m *LifecycleHandler) Reset() { *m = LifecycleHandler{} } func (*LifecycleHandler) ProtoMessage() {} func (*LifecycleHandler) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{73} + return fileDescriptor_83c10c24ec417dc9, []int{74} } func (m *LifecycleHandler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2124,7 +2152,7 @@ var xxx_messageInfo_LifecycleHandler proto.InternalMessageInfo func (m *LimitRange) Reset() { *m = LimitRange{} } func (*LimitRange) ProtoMessage() {} func (*LimitRange) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{74} + return fileDescriptor_83c10c24ec417dc9, []int{75} } func (m *LimitRange) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2152,7 +2180,7 @@ var xxx_messageInfo_LimitRange proto.InternalMessageInfo func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} } func (*LimitRangeItem) ProtoMessage() {} func (*LimitRangeItem) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{75} + return fileDescriptor_83c10c24ec417dc9, []int{76} } func (m *LimitRangeItem) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2180,7 +2208,7 @@ var xxx_messageInfo_LimitRangeItem proto.InternalMessageInfo func (m *LimitRangeList) Reset() { *m = LimitRangeList{} } func (*LimitRangeList) ProtoMessage() {} func (*LimitRangeList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{76} + return fileDescriptor_83c10c24ec417dc9, []int{77} } func (m *LimitRangeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2208,7 +2236,7 @@ var xxx_messageInfo_LimitRangeList proto.InternalMessageInfo func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} } func (*LimitRangeSpec) ProtoMessage() {} func (*LimitRangeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{77} + return fileDescriptor_83c10c24ec417dc9, []int{78} } func (m *LimitRangeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2236,7 +2264,7 @@ var xxx_messageInfo_LimitRangeSpec proto.InternalMessageInfo func (m *List) Reset() { *m = List{} } func (*List) ProtoMessage() {} func (*List) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{78} + return fileDescriptor_83c10c24ec417dc9, []int{79} } func (m *List) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2264,7 +2292,7 @@ var xxx_messageInfo_List proto.InternalMessageInfo func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} } func (*LoadBalancerIngress) ProtoMessage() {} func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{79} + return fileDescriptor_83c10c24ec417dc9, []int{80} } func (m *LoadBalancerIngress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2292,7 +2320,7 @@ var xxx_messageInfo_LoadBalancerIngress proto.InternalMessageInfo func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} } func (*LoadBalancerStatus) ProtoMessage() {} func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{80} + return fileDescriptor_83c10c24ec417dc9, []int{81} } func (m *LoadBalancerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2320,7 +2348,7 @@ var xxx_messageInfo_LoadBalancerStatus proto.InternalMessageInfo func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} } func (*LocalObjectReference) ProtoMessage() {} func (*LocalObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{81} + return fileDescriptor_83c10c24ec417dc9, []int{82} } func (m *LocalObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2348,7 +2376,7 @@ var xxx_messageInfo_LocalObjectReference proto.InternalMessageInfo func (m *LocalVolumeSource) Reset() { *m = LocalVolumeSource{} } func (*LocalVolumeSource) ProtoMessage() {} func (*LocalVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{82} + return fileDescriptor_83c10c24ec417dc9, []int{83} } func (m *LocalVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2376,7 +2404,7 @@ var xxx_messageInfo_LocalVolumeSource proto.InternalMessageInfo func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} } func (*NFSVolumeSource) ProtoMessage() {} func (*NFSVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{83} + return fileDescriptor_83c10c24ec417dc9, []int{84} } func (m *NFSVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2404,7 +2432,7 @@ var xxx_messageInfo_NFSVolumeSource proto.InternalMessageInfo func (m *Namespace) Reset() { *m = Namespace{} } func (*Namespace) ProtoMessage() {} func (*Namespace) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{84} + return fileDescriptor_83c10c24ec417dc9, []int{85} } func (m *Namespace) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2432,7 +2460,7 @@ var xxx_messageInfo_Namespace proto.InternalMessageInfo func (m *NamespaceCondition) Reset() { *m = NamespaceCondition{} } func (*NamespaceCondition) ProtoMessage() {} func (*NamespaceCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{85} + return fileDescriptor_83c10c24ec417dc9, []int{86} } func (m *NamespaceCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2460,7 +2488,7 @@ var xxx_messageInfo_NamespaceCondition proto.InternalMessageInfo func (m *NamespaceList) Reset() { *m = NamespaceList{} } func (*NamespaceList) ProtoMessage() {} func (*NamespaceList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{86} + return fileDescriptor_83c10c24ec417dc9, []int{87} } func (m *NamespaceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2488,7 +2516,7 @@ var xxx_messageInfo_NamespaceList proto.InternalMessageInfo func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} } func (*NamespaceSpec) ProtoMessage() {} func (*NamespaceSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{87} + return fileDescriptor_83c10c24ec417dc9, []int{88} } func (m *NamespaceSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2516,7 +2544,7 @@ var xxx_messageInfo_NamespaceSpec proto.InternalMessageInfo func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} } func (*NamespaceStatus) ProtoMessage() {} func (*NamespaceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{88} + return fileDescriptor_83c10c24ec417dc9, []int{89} } func (m *NamespaceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2544,7 +2572,7 @@ var xxx_messageInfo_NamespaceStatus proto.InternalMessageInfo func (m *Node) Reset() { *m = Node{} } func (*Node) ProtoMessage() {} func (*Node) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{89} + return fileDescriptor_83c10c24ec417dc9, []int{90} } func (m *Node) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2572,7 +2600,7 @@ var xxx_messageInfo_Node proto.InternalMessageInfo func (m *NodeAddress) Reset() { *m = NodeAddress{} } func (*NodeAddress) ProtoMessage() {} func (*NodeAddress) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{90} + return fileDescriptor_83c10c24ec417dc9, []int{91} } func (m *NodeAddress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2600,7 +2628,7 @@ var xxx_messageInfo_NodeAddress proto.InternalMessageInfo func (m *NodeAffinity) Reset() { *m = NodeAffinity{} } func (*NodeAffinity) ProtoMessage() {} func (*NodeAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{91} + return fileDescriptor_83c10c24ec417dc9, []int{92} } func (m *NodeAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2628,7 +2656,7 @@ var xxx_messageInfo_NodeAffinity proto.InternalMessageInfo func (m *NodeCondition) Reset() { *m = NodeCondition{} } func (*NodeCondition) ProtoMessage() {} func (*NodeCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{92} + return fileDescriptor_83c10c24ec417dc9, []int{93} } func (m *NodeCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2656,7 +2684,7 @@ var xxx_messageInfo_NodeCondition proto.InternalMessageInfo func (m *NodeConfigSource) Reset() { *m = NodeConfigSource{} } func (*NodeConfigSource) ProtoMessage() {} func (*NodeConfigSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{93} + return fileDescriptor_83c10c24ec417dc9, []int{94} } func (m *NodeConfigSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2684,7 +2712,7 @@ var xxx_messageInfo_NodeConfigSource proto.InternalMessageInfo func (m *NodeConfigStatus) Reset() { *m = NodeConfigStatus{} } func (*NodeConfigStatus) ProtoMessage() {} func (*NodeConfigStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{94} + return fileDescriptor_83c10c24ec417dc9, []int{95} } func (m *NodeConfigStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2712,7 +2740,7 @@ var xxx_messageInfo_NodeConfigStatus proto.InternalMessageInfo func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} } func (*NodeDaemonEndpoints) ProtoMessage() {} func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{95} + return fileDescriptor_83c10c24ec417dc9, []int{96} } func (m *NodeDaemonEndpoints) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2740,7 +2768,7 @@ var xxx_messageInfo_NodeDaemonEndpoints proto.InternalMessageInfo func (m *NodeList) Reset() { *m = NodeList{} } func (*NodeList) ProtoMessage() {} func (*NodeList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{96} + return fileDescriptor_83c10c24ec417dc9, []int{97} } func (m *NodeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2768,7 +2796,7 @@ var xxx_messageInfo_NodeList proto.InternalMessageInfo func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} } func (*NodeProxyOptions) ProtoMessage() {} func (*NodeProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{97} + return fileDescriptor_83c10c24ec417dc9, []int{98} } func (m *NodeProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2796,7 +2824,7 @@ var xxx_messageInfo_NodeProxyOptions proto.InternalMessageInfo func (m *NodeResources) Reset() { *m = NodeResources{} } func (*NodeResources) ProtoMessage() {} func (*NodeResources) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{98} + return fileDescriptor_83c10c24ec417dc9, []int{99} } func (m *NodeResources) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2824,7 +2852,7 @@ var xxx_messageInfo_NodeResources proto.InternalMessageInfo func (m *NodeSelector) Reset() { *m = NodeSelector{} } func (*NodeSelector) ProtoMessage() {} func (*NodeSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{99} + return fileDescriptor_83c10c24ec417dc9, []int{100} } func (m *NodeSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2852,7 +2880,7 @@ var xxx_messageInfo_NodeSelector proto.InternalMessageInfo func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} } func (*NodeSelectorRequirement) ProtoMessage() {} func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{100} + return fileDescriptor_83c10c24ec417dc9, []int{101} } func (m *NodeSelectorRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2880,7 +2908,7 @@ var xxx_messageInfo_NodeSelectorRequirement proto.InternalMessageInfo func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} } func (*NodeSelectorTerm) ProtoMessage() {} func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{101} + return fileDescriptor_83c10c24ec417dc9, []int{102} } func (m *NodeSelectorTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2908,7 +2936,7 @@ var xxx_messageInfo_NodeSelectorTerm proto.InternalMessageInfo func (m *NodeSpec) Reset() { *m = NodeSpec{} } func (*NodeSpec) ProtoMessage() {} func (*NodeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{102} + return fileDescriptor_83c10c24ec417dc9, []int{103} } func (m *NodeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2936,7 +2964,7 @@ var xxx_messageInfo_NodeSpec proto.InternalMessageInfo func (m *NodeStatus) Reset() { *m = NodeStatus{} } func (*NodeStatus) ProtoMessage() {} func (*NodeStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{103} + return fileDescriptor_83c10c24ec417dc9, []int{104} } func (m *NodeStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2964,7 +2992,7 @@ var xxx_messageInfo_NodeStatus proto.InternalMessageInfo func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} } func (*NodeSystemInfo) ProtoMessage() {} func (*NodeSystemInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{104} + return fileDescriptor_83c10c24ec417dc9, []int{105} } func (m *NodeSystemInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2992,7 +3020,7 @@ var xxx_messageInfo_NodeSystemInfo proto.InternalMessageInfo func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} } func (*ObjectFieldSelector) ProtoMessage() {} func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{105} + return fileDescriptor_83c10c24ec417dc9, []int{106} } func (m *ObjectFieldSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3020,7 +3048,7 @@ var xxx_messageInfo_ObjectFieldSelector proto.InternalMessageInfo func (m *ObjectReference) Reset() { *m = ObjectReference{} } func (*ObjectReference) ProtoMessage() {} func (*ObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{106} + return fileDescriptor_83c10c24ec417dc9, []int{107} } func (m *ObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3048,7 +3076,7 @@ var xxx_messageInfo_ObjectReference proto.InternalMessageInfo func (m *PersistentVolume) Reset() { *m = PersistentVolume{} } func (*PersistentVolume) ProtoMessage() {} func (*PersistentVolume) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{107} + return fileDescriptor_83c10c24ec417dc9, []int{108} } func (m *PersistentVolume) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3076,7 +3104,7 @@ var xxx_messageInfo_PersistentVolume proto.InternalMessageInfo func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} } func (*PersistentVolumeClaim) ProtoMessage() {} func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{108} + return fileDescriptor_83c10c24ec417dc9, []int{109} } func (m *PersistentVolumeClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3104,7 +3132,7 @@ var xxx_messageInfo_PersistentVolumeClaim proto.InternalMessageInfo func (m *PersistentVolumeClaimCondition) Reset() { *m = PersistentVolumeClaimCondition{} } func (*PersistentVolumeClaimCondition) ProtoMessage() {} func (*PersistentVolumeClaimCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{109} + return fileDescriptor_83c10c24ec417dc9, []int{110} } func (m *PersistentVolumeClaimCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3132,7 +3160,7 @@ var xxx_messageInfo_PersistentVolumeClaimCondition proto.InternalMessageInfo func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} } func (*PersistentVolumeClaimList) ProtoMessage() {} func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{110} + return fileDescriptor_83c10c24ec417dc9, []int{111} } func (m *PersistentVolumeClaimList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3160,7 +3188,7 @@ var xxx_messageInfo_PersistentVolumeClaimList proto.InternalMessageInfo func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} } func (*PersistentVolumeClaimSpec) ProtoMessage() {} func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{111} + return fileDescriptor_83c10c24ec417dc9, []int{112} } func (m *PersistentVolumeClaimSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3188,7 +3216,7 @@ var xxx_messageInfo_PersistentVolumeClaimSpec proto.InternalMessageInfo func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} } func (*PersistentVolumeClaimStatus) ProtoMessage() {} func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{112} + return fileDescriptor_83c10c24ec417dc9, []int{113} } func (m *PersistentVolumeClaimStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3216,7 +3244,7 @@ var xxx_messageInfo_PersistentVolumeClaimStatus proto.InternalMessageInfo func (m *PersistentVolumeClaimTemplate) Reset() { *m = PersistentVolumeClaimTemplate{} } func (*PersistentVolumeClaimTemplate) ProtoMessage() {} func (*PersistentVolumeClaimTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{113} + return fileDescriptor_83c10c24ec417dc9, []int{114} } func (m *PersistentVolumeClaimTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3244,7 +3272,7 @@ var xxx_messageInfo_PersistentVolumeClaimTemplate proto.InternalMessageInfo func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} } func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {} func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{114} + return fileDescriptor_83c10c24ec417dc9, []int{115} } func (m *PersistentVolumeClaimVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3272,7 +3300,7 @@ var xxx_messageInfo_PersistentVolumeClaimVolumeSource proto.InternalMessageInfo func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} } func (*PersistentVolumeList) ProtoMessage() {} func (*PersistentVolumeList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{115} + return fileDescriptor_83c10c24ec417dc9, []int{116} } func (m *PersistentVolumeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3300,7 +3328,7 @@ var xxx_messageInfo_PersistentVolumeList proto.InternalMessageInfo func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} } func (*PersistentVolumeSource) ProtoMessage() {} func (*PersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{116} + return fileDescriptor_83c10c24ec417dc9, []int{117} } func (m *PersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3328,7 +3356,7 @@ var xxx_messageInfo_PersistentVolumeSource proto.InternalMessageInfo func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} } func (*PersistentVolumeSpec) ProtoMessage() {} func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{117} + return fileDescriptor_83c10c24ec417dc9, []int{118} } func (m *PersistentVolumeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3356,7 +3384,7 @@ var xxx_messageInfo_PersistentVolumeSpec proto.InternalMessageInfo func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} } func (*PersistentVolumeStatus) ProtoMessage() {} func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{118} + return fileDescriptor_83c10c24ec417dc9, []int{119} } func (m *PersistentVolumeStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3384,7 +3412,7 @@ var xxx_messageInfo_PersistentVolumeStatus proto.InternalMessageInfo func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersistentDiskVolumeSource{} } func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {} func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{119} + return fileDescriptor_83c10c24ec417dc9, []int{120} } func (m *PhotonPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3412,7 +3440,7 @@ var xxx_messageInfo_PhotonPersistentDiskVolumeSource proto.InternalMessageInfo func (m *Pod) Reset() { *m = Pod{} } func (*Pod) ProtoMessage() {} func (*Pod) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{120} + return fileDescriptor_83c10c24ec417dc9, []int{121} } func (m *Pod) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3440,7 +3468,7 @@ var xxx_messageInfo_Pod proto.InternalMessageInfo func (m *PodAffinity) Reset() { *m = PodAffinity{} } func (*PodAffinity) ProtoMessage() {} func (*PodAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{121} + return fileDescriptor_83c10c24ec417dc9, []int{122} } func (m *PodAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3468,7 +3496,7 @@ var xxx_messageInfo_PodAffinity proto.InternalMessageInfo func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} } func (*PodAffinityTerm) ProtoMessage() {} func (*PodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{122} + return fileDescriptor_83c10c24ec417dc9, []int{123} } func (m *PodAffinityTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3496,7 +3524,7 @@ var xxx_messageInfo_PodAffinityTerm proto.InternalMessageInfo func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} } func (*PodAntiAffinity) ProtoMessage() {} func (*PodAntiAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{123} + return fileDescriptor_83c10c24ec417dc9, []int{124} } func (m *PodAntiAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3524,7 +3552,7 @@ var xxx_messageInfo_PodAntiAffinity proto.InternalMessageInfo func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} } func (*PodAttachOptions) ProtoMessage() {} func (*PodAttachOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{124} + return fileDescriptor_83c10c24ec417dc9, []int{125} } func (m *PodAttachOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3552,7 +3580,7 @@ var xxx_messageInfo_PodAttachOptions proto.InternalMessageInfo func (m *PodCondition) Reset() { *m = PodCondition{} } func (*PodCondition) ProtoMessage() {} func (*PodCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{125} + return fileDescriptor_83c10c24ec417dc9, []int{126} } func (m *PodCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3580,7 +3608,7 @@ var xxx_messageInfo_PodCondition proto.InternalMessageInfo func (m *PodDNSConfig) Reset() { *m = PodDNSConfig{} } func (*PodDNSConfig) ProtoMessage() {} func (*PodDNSConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{126} + return fileDescriptor_83c10c24ec417dc9, []int{127} } func (m *PodDNSConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3608,7 +3636,7 @@ var xxx_messageInfo_PodDNSConfig proto.InternalMessageInfo func (m *PodDNSConfigOption) Reset() { *m = PodDNSConfigOption{} } func (*PodDNSConfigOption) ProtoMessage() {} func (*PodDNSConfigOption) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{127} + return fileDescriptor_83c10c24ec417dc9, []int{128} } func (m *PodDNSConfigOption) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3636,7 +3664,7 @@ var xxx_messageInfo_PodDNSConfigOption proto.InternalMessageInfo func (m *PodExecOptions) Reset() { *m = PodExecOptions{} } func (*PodExecOptions) ProtoMessage() {} func (*PodExecOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{128} + return fileDescriptor_83c10c24ec417dc9, []int{129} } func (m *PodExecOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3664,7 +3692,7 @@ var xxx_messageInfo_PodExecOptions proto.InternalMessageInfo func (m *PodIP) Reset() { *m = PodIP{} } func (*PodIP) ProtoMessage() {} func (*PodIP) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{129} + return fileDescriptor_83c10c24ec417dc9, []int{130} } func (m *PodIP) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3692,7 +3720,7 @@ var xxx_messageInfo_PodIP proto.InternalMessageInfo func (m *PodList) Reset() { *m = PodList{} } func (*PodList) ProtoMessage() {} func (*PodList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{130} + return fileDescriptor_83c10c24ec417dc9, []int{131} } func (m *PodList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3720,7 +3748,7 @@ var xxx_messageInfo_PodList proto.InternalMessageInfo func (m *PodLogOptions) Reset() { *m = PodLogOptions{} } func (*PodLogOptions) ProtoMessage() {} func (*PodLogOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{131} + return fileDescriptor_83c10c24ec417dc9, []int{132} } func (m *PodLogOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3748,7 +3776,7 @@ var xxx_messageInfo_PodLogOptions proto.InternalMessageInfo func (m *PodOS) Reset() { *m = PodOS{} } func (*PodOS) ProtoMessage() {} func (*PodOS) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{132} + return fileDescriptor_83c10c24ec417dc9, []int{133} } func (m *PodOS) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3776,7 +3804,7 @@ var xxx_messageInfo_PodOS proto.InternalMessageInfo func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} } func (*PodPortForwardOptions) ProtoMessage() {} func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{133} + return fileDescriptor_83c10c24ec417dc9, []int{134} } func (m *PodPortForwardOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3804,7 +3832,7 @@ var xxx_messageInfo_PodPortForwardOptions proto.InternalMessageInfo func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} } func (*PodProxyOptions) ProtoMessage() {} func (*PodProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{134} + return fileDescriptor_83c10c24ec417dc9, []int{135} } func (m *PodProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3832,7 +3860,7 @@ var xxx_messageInfo_PodProxyOptions proto.InternalMessageInfo func (m *PodReadinessGate) Reset() { *m = PodReadinessGate{} } func (*PodReadinessGate) ProtoMessage() {} func (*PodReadinessGate) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{135} + return fileDescriptor_83c10c24ec417dc9, []int{136} } func (m *PodReadinessGate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3860,7 +3888,7 @@ var xxx_messageInfo_PodReadinessGate proto.InternalMessageInfo func (m *PodResourceClaim) Reset() { *m = PodResourceClaim{} } func (*PodResourceClaim) ProtoMessage() {} func (*PodResourceClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{136} + return fileDescriptor_83c10c24ec417dc9, []int{137} } func (m *PodResourceClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3888,7 +3916,7 @@ var xxx_messageInfo_PodResourceClaim proto.InternalMessageInfo func (m *PodSchedulingGate) Reset() { *m = PodSchedulingGate{} } func (*PodSchedulingGate) ProtoMessage() {} func (*PodSchedulingGate) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{137} + return fileDescriptor_83c10c24ec417dc9, []int{138} } func (m *PodSchedulingGate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3916,7 +3944,7 @@ var xxx_messageInfo_PodSchedulingGate proto.InternalMessageInfo func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} } func (*PodSecurityContext) ProtoMessage() {} func (*PodSecurityContext) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{138} + return fileDescriptor_83c10c24ec417dc9, []int{139} } func (m *PodSecurityContext) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3944,7 +3972,7 @@ var xxx_messageInfo_PodSecurityContext proto.InternalMessageInfo func (m *PodSignature) Reset() { *m = PodSignature{} } func (*PodSignature) ProtoMessage() {} func (*PodSignature) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{139} + return fileDescriptor_83c10c24ec417dc9, []int{140} } func (m *PodSignature) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3972,7 +4000,7 @@ var xxx_messageInfo_PodSignature proto.InternalMessageInfo func (m *PodSpec) Reset() { *m = PodSpec{} } func (*PodSpec) ProtoMessage() {} func (*PodSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{140} + return fileDescriptor_83c10c24ec417dc9, []int{141} } func (m *PodSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4000,7 +4028,7 @@ var xxx_messageInfo_PodSpec proto.InternalMessageInfo func (m *PodStatus) Reset() { *m = PodStatus{} } func (*PodStatus) ProtoMessage() {} func (*PodStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{141} + return fileDescriptor_83c10c24ec417dc9, []int{142} } func (m *PodStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4028,7 +4056,7 @@ var xxx_messageInfo_PodStatus proto.InternalMessageInfo func (m *PodStatusResult) Reset() { *m = PodStatusResult{} } func (*PodStatusResult) ProtoMessage() {} func (*PodStatusResult) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{142} + return fileDescriptor_83c10c24ec417dc9, []int{143} } func (m *PodStatusResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4056,7 +4084,7 @@ var xxx_messageInfo_PodStatusResult proto.InternalMessageInfo func (m *PodTemplate) Reset() { *m = PodTemplate{} } func (*PodTemplate) ProtoMessage() {} func (*PodTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{143} + return fileDescriptor_83c10c24ec417dc9, []int{144} } func (m *PodTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4084,7 +4112,7 @@ var xxx_messageInfo_PodTemplate proto.InternalMessageInfo func (m *PodTemplateList) Reset() { *m = PodTemplateList{} } func (*PodTemplateList) ProtoMessage() {} func (*PodTemplateList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{144} + return fileDescriptor_83c10c24ec417dc9, []int{145} } func (m *PodTemplateList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4112,7 +4140,7 @@ var xxx_messageInfo_PodTemplateList proto.InternalMessageInfo func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} } func (*PodTemplateSpec) ProtoMessage() {} func (*PodTemplateSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{145} + return fileDescriptor_83c10c24ec417dc9, []int{146} } func (m *PodTemplateSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4140,7 +4168,7 @@ var xxx_messageInfo_PodTemplateSpec proto.InternalMessageInfo func (m *PortStatus) Reset() { *m = PortStatus{} } func (*PortStatus) ProtoMessage() {} func (*PortStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{146} + return fileDescriptor_83c10c24ec417dc9, []int{147} } func (m *PortStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4168,7 +4196,7 @@ var xxx_messageInfo_PortStatus proto.InternalMessageInfo func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} } func (*PortworxVolumeSource) ProtoMessage() {} func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{147} + return fileDescriptor_83c10c24ec417dc9, []int{148} } func (m *PortworxVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4196,7 +4224,7 @@ var xxx_messageInfo_PortworxVolumeSource proto.InternalMessageInfo func (m *Preconditions) Reset() { *m = Preconditions{} } func (*Preconditions) ProtoMessage() {} func (*Preconditions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{148} + return fileDescriptor_83c10c24ec417dc9, []int{149} } func (m *Preconditions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4224,7 +4252,7 @@ var xxx_messageInfo_Preconditions proto.InternalMessageInfo func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} } func (*PreferAvoidPodsEntry) ProtoMessage() {} func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{149} + return fileDescriptor_83c10c24ec417dc9, []int{150} } func (m *PreferAvoidPodsEntry) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4252,7 +4280,7 @@ var xxx_messageInfo_PreferAvoidPodsEntry proto.InternalMessageInfo func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} } func (*PreferredSchedulingTerm) ProtoMessage() {} func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{150} + return fileDescriptor_83c10c24ec417dc9, []int{151} } func (m *PreferredSchedulingTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4280,7 +4308,7 @@ var xxx_messageInfo_PreferredSchedulingTerm proto.InternalMessageInfo func (m *Probe) Reset() { *m = Probe{} } func (*Probe) ProtoMessage() {} func (*Probe) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{151} + return fileDescriptor_83c10c24ec417dc9, []int{152} } func (m *Probe) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4308,7 +4336,7 @@ var xxx_messageInfo_Probe proto.InternalMessageInfo func (m *ProbeHandler) Reset() { *m = ProbeHandler{} } func (*ProbeHandler) ProtoMessage() {} func (*ProbeHandler) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{152} + return fileDescriptor_83c10c24ec417dc9, []int{153} } func (m *ProbeHandler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4336,7 +4364,7 @@ var xxx_messageInfo_ProbeHandler proto.InternalMessageInfo func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} } func (*ProjectedVolumeSource) ProtoMessage() {} func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{153} + return fileDescriptor_83c10c24ec417dc9, []int{154} } func (m *ProjectedVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4364,7 +4392,7 @@ var xxx_messageInfo_ProjectedVolumeSource proto.InternalMessageInfo func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} } func (*QuobyteVolumeSource) ProtoMessage() {} func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{154} + return fileDescriptor_83c10c24ec417dc9, []int{155} } func (m *QuobyteVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4392,7 +4420,7 @@ var xxx_messageInfo_QuobyteVolumeSource proto.InternalMessageInfo func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} } func (*RBDPersistentVolumeSource) ProtoMessage() {} func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{155} + return fileDescriptor_83c10c24ec417dc9, []int{156} } func (m *RBDPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4420,7 +4448,7 @@ var xxx_messageInfo_RBDPersistentVolumeSource proto.InternalMessageInfo func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} } func (*RBDVolumeSource) ProtoMessage() {} func (*RBDVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{156} + return fileDescriptor_83c10c24ec417dc9, []int{157} } func (m *RBDVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4448,7 +4476,7 @@ var xxx_messageInfo_RBDVolumeSource proto.InternalMessageInfo func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } func (*RangeAllocation) ProtoMessage() {} func (*RangeAllocation) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{157} + return fileDescriptor_83c10c24ec417dc9, []int{158} } func (m *RangeAllocation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4476,7 +4504,7 @@ var xxx_messageInfo_RangeAllocation proto.InternalMessageInfo func (m *ReplicationController) Reset() { *m = ReplicationController{} } func (*ReplicationController) ProtoMessage() {} func (*ReplicationController) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{158} + return fileDescriptor_83c10c24ec417dc9, []int{159} } func (m *ReplicationController) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4504,7 +4532,7 @@ var xxx_messageInfo_ReplicationController proto.InternalMessageInfo func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} } func (*ReplicationControllerCondition) ProtoMessage() {} func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{159} + return fileDescriptor_83c10c24ec417dc9, []int{160} } func (m *ReplicationControllerCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4532,7 +4560,7 @@ var xxx_messageInfo_ReplicationControllerCondition proto.InternalMessageInfo func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} } func (*ReplicationControllerList) ProtoMessage() {} func (*ReplicationControllerList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{160} + return fileDescriptor_83c10c24ec417dc9, []int{161} } func (m *ReplicationControllerList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4560,7 +4588,7 @@ var xxx_messageInfo_ReplicationControllerList proto.InternalMessageInfo func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} } func (*ReplicationControllerSpec) ProtoMessage() {} func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{161} + return fileDescriptor_83c10c24ec417dc9, []int{162} } func (m *ReplicationControllerSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4588,7 +4616,7 @@ var xxx_messageInfo_ReplicationControllerSpec proto.InternalMessageInfo func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} } func (*ReplicationControllerStatus) ProtoMessage() {} func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{162} + return fileDescriptor_83c10c24ec417dc9, []int{163} } func (m *ReplicationControllerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4616,7 +4644,7 @@ var xxx_messageInfo_ReplicationControllerStatus proto.InternalMessageInfo func (m *ResourceClaim) Reset() { *m = ResourceClaim{} } func (*ResourceClaim) ProtoMessage() {} func (*ResourceClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{163} + return fileDescriptor_83c10c24ec417dc9, []int{164} } func (m *ResourceClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4644,7 +4672,7 @@ var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} } func (*ResourceFieldSelector) ProtoMessage() {} func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{164} + return fileDescriptor_83c10c24ec417dc9, []int{165} } func (m *ResourceFieldSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4672,7 +4700,7 @@ var xxx_messageInfo_ResourceFieldSelector proto.InternalMessageInfo func (m *ResourceQuota) Reset() { *m = ResourceQuota{} } func (*ResourceQuota) ProtoMessage() {} func (*ResourceQuota) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{165} + return fileDescriptor_83c10c24ec417dc9, []int{166} } func (m *ResourceQuota) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4700,7 +4728,7 @@ var xxx_messageInfo_ResourceQuota proto.InternalMessageInfo func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} } func (*ResourceQuotaList) ProtoMessage() {} func (*ResourceQuotaList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{166} + return fileDescriptor_83c10c24ec417dc9, []int{167} } func (m *ResourceQuotaList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4728,7 +4756,7 @@ var xxx_messageInfo_ResourceQuotaList proto.InternalMessageInfo func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} } func (*ResourceQuotaSpec) ProtoMessage() {} func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{167} + return fileDescriptor_83c10c24ec417dc9, []int{168} } func (m *ResourceQuotaSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4756,7 +4784,7 @@ var xxx_messageInfo_ResourceQuotaSpec proto.InternalMessageInfo func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} } func (*ResourceQuotaStatus) ProtoMessage() {} func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{168} + return fileDescriptor_83c10c24ec417dc9, []int{169} } func (m *ResourceQuotaStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4784,7 +4812,7 @@ var xxx_messageInfo_ResourceQuotaStatus proto.InternalMessageInfo func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } func (*ResourceRequirements) ProtoMessage() {} func (*ResourceRequirements) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{169} + return fileDescriptor_83c10c24ec417dc9, []int{170} } func (m *ResourceRequirements) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4812,7 +4840,7 @@ var xxx_messageInfo_ResourceRequirements proto.InternalMessageInfo func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} } func (*SELinuxOptions) ProtoMessage() {} func (*SELinuxOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{170} + return fileDescriptor_83c10c24ec417dc9, []int{171} } func (m *SELinuxOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4840,7 +4868,7 @@ var xxx_messageInfo_SELinuxOptions proto.InternalMessageInfo func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} } func (*ScaleIOPersistentVolumeSource) ProtoMessage() {} func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{171} + return fileDescriptor_83c10c24ec417dc9, []int{172} } func (m *ScaleIOPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4868,7 +4896,7 @@ var xxx_messageInfo_ScaleIOPersistentVolumeSource proto.InternalMessageInfo func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} } func (*ScaleIOVolumeSource) ProtoMessage() {} func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{172} + return fileDescriptor_83c10c24ec417dc9, []int{173} } func (m *ScaleIOVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4896,7 +4924,7 @@ var xxx_messageInfo_ScaleIOVolumeSource proto.InternalMessageInfo func (m *ScopeSelector) Reset() { *m = ScopeSelector{} } func (*ScopeSelector) ProtoMessage() {} func (*ScopeSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{173} + return fileDescriptor_83c10c24ec417dc9, []int{174} } func (m *ScopeSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4924,7 +4952,7 @@ var xxx_messageInfo_ScopeSelector proto.InternalMessageInfo func (m *ScopedResourceSelectorRequirement) Reset() { *m = ScopedResourceSelectorRequirement{} } func (*ScopedResourceSelectorRequirement) ProtoMessage() {} func (*ScopedResourceSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{174} + return fileDescriptor_83c10c24ec417dc9, []int{175} } func (m *ScopedResourceSelectorRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4952,7 +4980,7 @@ var xxx_messageInfo_ScopedResourceSelectorRequirement proto.InternalMessageInfo func (m *SeccompProfile) Reset() { *m = SeccompProfile{} } func (*SeccompProfile) ProtoMessage() {} func (*SeccompProfile) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{175} + return fileDescriptor_83c10c24ec417dc9, []int{176} } func (m *SeccompProfile) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4980,7 +5008,7 @@ var xxx_messageInfo_SeccompProfile proto.InternalMessageInfo func (m *Secret) Reset() { *m = Secret{} } func (*Secret) ProtoMessage() {} func (*Secret) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{176} + return fileDescriptor_83c10c24ec417dc9, []int{177} } func (m *Secret) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5008,7 +5036,7 @@ var xxx_messageInfo_Secret proto.InternalMessageInfo func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} } func (*SecretEnvSource) ProtoMessage() {} func (*SecretEnvSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{177} + return fileDescriptor_83c10c24ec417dc9, []int{178} } func (m *SecretEnvSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5036,7 +5064,7 @@ var xxx_messageInfo_SecretEnvSource proto.InternalMessageInfo func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} } func (*SecretKeySelector) ProtoMessage() {} func (*SecretKeySelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{178} + return fileDescriptor_83c10c24ec417dc9, []int{179} } func (m *SecretKeySelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5064,7 +5092,7 @@ var xxx_messageInfo_SecretKeySelector proto.InternalMessageInfo func (m *SecretList) Reset() { *m = SecretList{} } func (*SecretList) ProtoMessage() {} func (*SecretList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{179} + return fileDescriptor_83c10c24ec417dc9, []int{180} } func (m *SecretList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5092,7 +5120,7 @@ var xxx_messageInfo_SecretList proto.InternalMessageInfo func (m *SecretProjection) Reset() { *m = SecretProjection{} } func (*SecretProjection) ProtoMessage() {} func (*SecretProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{180} + return fileDescriptor_83c10c24ec417dc9, []int{181} } func (m *SecretProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5120,7 +5148,7 @@ var xxx_messageInfo_SecretProjection proto.InternalMessageInfo func (m *SecretReference) Reset() { *m = SecretReference{} } func (*SecretReference) ProtoMessage() {} func (*SecretReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{181} + return fileDescriptor_83c10c24ec417dc9, []int{182} } func (m *SecretReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5148,7 +5176,7 @@ var xxx_messageInfo_SecretReference proto.InternalMessageInfo func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} } func (*SecretVolumeSource) ProtoMessage() {} func (*SecretVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{182} + return fileDescriptor_83c10c24ec417dc9, []int{183} } func (m *SecretVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5176,7 +5204,7 @@ var xxx_messageInfo_SecretVolumeSource proto.InternalMessageInfo func (m *SecurityContext) Reset() { *m = SecurityContext{} } func (*SecurityContext) ProtoMessage() {} func (*SecurityContext) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{183} + return fileDescriptor_83c10c24ec417dc9, []int{184} } func (m *SecurityContext) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5204,7 +5232,7 @@ var xxx_messageInfo_SecurityContext proto.InternalMessageInfo func (m *SerializedReference) Reset() { *m = SerializedReference{} } func (*SerializedReference) ProtoMessage() {} func (*SerializedReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{184} + return fileDescriptor_83c10c24ec417dc9, []int{185} } func (m *SerializedReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5232,7 +5260,7 @@ var xxx_messageInfo_SerializedReference proto.InternalMessageInfo func (m *Service) Reset() { *m = Service{} } func (*Service) ProtoMessage() {} func (*Service) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{185} + return fileDescriptor_83c10c24ec417dc9, []int{186} } func (m *Service) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5260,7 +5288,7 @@ var xxx_messageInfo_Service proto.InternalMessageInfo func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } func (*ServiceAccount) ProtoMessage() {} func (*ServiceAccount) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{186} + return fileDescriptor_83c10c24ec417dc9, []int{187} } func (m *ServiceAccount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5288,7 +5316,7 @@ var xxx_messageInfo_ServiceAccount proto.InternalMessageInfo func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} } func (*ServiceAccountList) ProtoMessage() {} func (*ServiceAccountList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{187} + return fileDescriptor_83c10c24ec417dc9, []int{188} } func (m *ServiceAccountList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5316,7 +5344,7 @@ var xxx_messageInfo_ServiceAccountList proto.InternalMessageInfo func (m *ServiceAccountTokenProjection) Reset() { *m = ServiceAccountTokenProjection{} } func (*ServiceAccountTokenProjection) ProtoMessage() {} func (*ServiceAccountTokenProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{188} + return fileDescriptor_83c10c24ec417dc9, []int{189} } func (m *ServiceAccountTokenProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5344,7 +5372,7 @@ var xxx_messageInfo_ServiceAccountTokenProjection proto.InternalMessageInfo func (m *ServiceList) Reset() { *m = ServiceList{} } func (*ServiceList) ProtoMessage() {} func (*ServiceList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{189} + return fileDescriptor_83c10c24ec417dc9, []int{190} } func (m *ServiceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5372,7 +5400,7 @@ var xxx_messageInfo_ServiceList proto.InternalMessageInfo func (m *ServicePort) Reset() { *m = ServicePort{} } func (*ServicePort) ProtoMessage() {} func (*ServicePort) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{190} + return fileDescriptor_83c10c24ec417dc9, []int{191} } func (m *ServicePort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5400,7 +5428,7 @@ var xxx_messageInfo_ServicePort proto.InternalMessageInfo func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } func (*ServiceProxyOptions) ProtoMessage() {} func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{191} + return fileDescriptor_83c10c24ec417dc9, []int{192} } func (m *ServiceProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5428,7 +5456,7 @@ var xxx_messageInfo_ServiceProxyOptions proto.InternalMessageInfo func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } func (*ServiceSpec) ProtoMessage() {} func (*ServiceSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{192} + return fileDescriptor_83c10c24ec417dc9, []int{193} } func (m *ServiceSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5456,7 +5484,7 @@ var xxx_messageInfo_ServiceSpec proto.InternalMessageInfo func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } func (*ServiceStatus) ProtoMessage() {} func (*ServiceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{193} + return fileDescriptor_83c10c24ec417dc9, []int{194} } func (m *ServiceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5484,7 +5512,7 @@ var xxx_messageInfo_ServiceStatus proto.InternalMessageInfo func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} } func (*SessionAffinityConfig) ProtoMessage() {} func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{194} + return fileDescriptor_83c10c24ec417dc9, []int{195} } func (m *SessionAffinityConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5512,7 +5540,7 @@ var xxx_messageInfo_SessionAffinityConfig proto.InternalMessageInfo func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} } func (*StorageOSPersistentVolumeSource) ProtoMessage() {} func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{195} + return fileDescriptor_83c10c24ec417dc9, []int{196} } func (m *StorageOSPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5540,7 +5568,7 @@ var xxx_messageInfo_StorageOSPersistentVolumeSource proto.InternalMessageInfo func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} } func (*StorageOSVolumeSource) ProtoMessage() {} func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{196} + return fileDescriptor_83c10c24ec417dc9, []int{197} } func (m *StorageOSVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5568,7 +5596,7 @@ var xxx_messageInfo_StorageOSVolumeSource proto.InternalMessageInfo func (m *Sysctl) Reset() { *m = Sysctl{} } func (*Sysctl) ProtoMessage() {} func (*Sysctl) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{197} + return fileDescriptor_83c10c24ec417dc9, []int{198} } func (m *Sysctl) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5596,7 +5624,7 @@ var xxx_messageInfo_Sysctl proto.InternalMessageInfo func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } func (*TCPSocketAction) ProtoMessage() {} func (*TCPSocketAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{198} + return fileDescriptor_83c10c24ec417dc9, []int{199} } func (m *TCPSocketAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5624,7 +5652,7 @@ var xxx_messageInfo_TCPSocketAction proto.InternalMessageInfo func (m *Taint) Reset() { *m = Taint{} } func (*Taint) ProtoMessage() {} func (*Taint) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{199} + return fileDescriptor_83c10c24ec417dc9, []int{200} } func (m *Taint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5652,7 +5680,7 @@ var xxx_messageInfo_Taint proto.InternalMessageInfo func (m *Toleration) Reset() { *m = Toleration{} } func (*Toleration) ProtoMessage() {} func (*Toleration) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{200} + return fileDescriptor_83c10c24ec417dc9, []int{201} } func (m *Toleration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5680,7 +5708,7 @@ var xxx_messageInfo_Toleration proto.InternalMessageInfo func (m *TopologySelectorLabelRequirement) Reset() { *m = TopologySelectorLabelRequirement{} } func (*TopologySelectorLabelRequirement) ProtoMessage() {} func (*TopologySelectorLabelRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{201} + return fileDescriptor_83c10c24ec417dc9, []int{202} } func (m *TopologySelectorLabelRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5708,7 +5736,7 @@ var xxx_messageInfo_TopologySelectorLabelRequirement proto.InternalMessageInfo func (m *TopologySelectorTerm) Reset() { *m = TopologySelectorTerm{} } func (*TopologySelectorTerm) ProtoMessage() {} func (*TopologySelectorTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{202} + return fileDescriptor_83c10c24ec417dc9, []int{203} } func (m *TopologySelectorTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5736,7 +5764,7 @@ var xxx_messageInfo_TopologySelectorTerm proto.InternalMessageInfo func (m *TopologySpreadConstraint) Reset() { *m = TopologySpreadConstraint{} } func (*TopologySpreadConstraint) ProtoMessage() {} func (*TopologySpreadConstraint) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{203} + return fileDescriptor_83c10c24ec417dc9, []int{204} } func (m *TopologySpreadConstraint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5764,7 +5792,7 @@ var xxx_messageInfo_TopologySpreadConstraint proto.InternalMessageInfo func (m *TypedLocalObjectReference) Reset() { *m = TypedLocalObjectReference{} } func (*TypedLocalObjectReference) ProtoMessage() {} func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{204} + return fileDescriptor_83c10c24ec417dc9, []int{205} } func (m *TypedLocalObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5792,7 +5820,7 @@ var xxx_messageInfo_TypedLocalObjectReference proto.InternalMessageInfo func (m *TypedObjectReference) Reset() { *m = TypedObjectReference{} } func (*TypedObjectReference) ProtoMessage() {} func (*TypedObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{205} + return fileDescriptor_83c10c24ec417dc9, []int{206} } func (m *TypedObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5820,7 +5848,7 @@ var xxx_messageInfo_TypedObjectReference proto.InternalMessageInfo func (m *Volume) Reset() { *m = Volume{} } func (*Volume) ProtoMessage() {} func (*Volume) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{206} + return fileDescriptor_83c10c24ec417dc9, []int{207} } func (m *Volume) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5848,7 +5876,7 @@ var xxx_messageInfo_Volume proto.InternalMessageInfo func (m *VolumeDevice) Reset() { *m = VolumeDevice{} } func (*VolumeDevice) ProtoMessage() {} func (*VolumeDevice) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{207} + return fileDescriptor_83c10c24ec417dc9, []int{208} } func (m *VolumeDevice) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5876,7 +5904,7 @@ var xxx_messageInfo_VolumeDevice proto.InternalMessageInfo func (m *VolumeMount) Reset() { *m = VolumeMount{} } func (*VolumeMount) ProtoMessage() {} func (*VolumeMount) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{208} + return fileDescriptor_83c10c24ec417dc9, []int{209} } func (m *VolumeMount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5904,7 +5932,7 @@ var xxx_messageInfo_VolumeMount proto.InternalMessageInfo func (m *VolumeNodeAffinity) Reset() { *m = VolumeNodeAffinity{} } func (*VolumeNodeAffinity) ProtoMessage() {} func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{209} + return fileDescriptor_83c10c24ec417dc9, []int{210} } func (m *VolumeNodeAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5932,7 +5960,7 @@ var xxx_messageInfo_VolumeNodeAffinity proto.InternalMessageInfo func (m *VolumeProjection) Reset() { *m = VolumeProjection{} } func (*VolumeProjection) ProtoMessage() {} func (*VolumeProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{210} + return fileDescriptor_83c10c24ec417dc9, []int{211} } func (m *VolumeProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5960,7 +5988,7 @@ var xxx_messageInfo_VolumeProjection proto.InternalMessageInfo func (m *VolumeSource) Reset() { *m = VolumeSource{} } func (*VolumeSource) ProtoMessage() {} func (*VolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{211} + return fileDescriptor_83c10c24ec417dc9, []int{212} } func (m *VolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5988,7 +6016,7 @@ var xxx_messageInfo_VolumeSource proto.InternalMessageInfo func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{212} + return fileDescriptor_83c10c24ec417dc9, []int{213} } func (m *VsphereVirtualDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6016,7 +6044,7 @@ var xxx_messageInfo_VsphereVirtualDiskVolumeSource proto.InternalMessageInfo func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } func (*WeightedPodAffinityTerm) ProtoMessage() {} func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{213} + return fileDescriptor_83c10c24ec417dc9, []int{214} } func (m *WeightedPodAffinityTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6044,7 +6072,7 @@ var xxx_messageInfo_WeightedPodAffinityTerm proto.InternalMessageInfo func (m *WindowsSecurityContextOptions) Reset() { *m = WindowsSecurityContextOptions{} } func (*WindowsSecurityContextOptions) ProtoMessage() {} func (*WindowsSecurityContextOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{214} + return fileDescriptor_83c10c24ec417dc9, []int{215} } func (m *WindowsSecurityContextOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6104,11 +6132,13 @@ func init() { proto.RegisterType((*Container)(nil), "k8s.io.api.core.v1.Container") proto.RegisterType((*ContainerImage)(nil), "k8s.io.api.core.v1.ContainerImage") proto.RegisterType((*ContainerPort)(nil), "k8s.io.api.core.v1.ContainerPort") + proto.RegisterType((*ContainerResizePolicy)(nil), "k8s.io.api.core.v1.ContainerResizePolicy") proto.RegisterType((*ContainerState)(nil), "k8s.io.api.core.v1.ContainerState") proto.RegisterType((*ContainerStateRunning)(nil), "k8s.io.api.core.v1.ContainerStateRunning") proto.RegisterType((*ContainerStateTerminated)(nil), "k8s.io.api.core.v1.ContainerStateTerminated") proto.RegisterType((*ContainerStateWaiting)(nil), "k8s.io.api.core.v1.ContainerStateWaiting") proto.RegisterType((*ContainerStatus)(nil), "k8s.io.api.core.v1.ContainerStatus") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ContainerStatus.AllocatedResourcesEntry") proto.RegisterType((*DaemonEndpoint)(nil), "k8s.io.api.core.v1.DaemonEndpoint") proto.RegisterType((*DownwardAPIProjection)(nil), "k8s.io.api.core.v1.DownwardAPIProjection") proto.RegisterType((*DownwardAPIVolumeFile)(nil), "k8s.io.api.core.v1.DownwardAPIVolumeFile") @@ -6320,917 +6350,925 @@ func init() { } var fileDescriptor_83c10c24ec417dc9 = []byte{ - // 14547 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x69, 0x8c, 0x24, 0xd7, - 0x79, 0x98, 0xaa, 0x7b, 0xae, 0xfe, 0xe6, 0x7e, 0xb3, 0xbb, 0x9c, 0x1d, 0x72, 0x77, 0x96, 0x45, - 0x72, 0xb9, 0x14, 0xc9, 0x19, 0x2d, 0x0f, 0x89, 0x26, 0x25, 0x5a, 0x73, 0xee, 0x36, 0x77, 0x67, - 0xb6, 0xf9, 0x7a, 0x76, 0x57, 0x07, 0x25, 0xa8, 0xa6, 0xfb, 0xcd, 0x4c, 0x69, 0xba, 0xab, 0x9a, - 0x55, 0xd5, 0xb3, 0x3b, 0x8c, 0x84, 0x38, 0xf2, 0x29, 0xdb, 0x09, 0x84, 0xc0, 0x39, 0x20, 0x1b, - 0x46, 0xe0, 0x38, 0xb6, 0x15, 0xe5, 0x52, 0xe4, 0xd8, 0x8e, 0xe5, 0xd8, 0xce, 0xed, 0x04, 0x81, - 0xe3, 0x18, 0x88, 0x65, 0xc0, 0xc8, 0xc4, 0x5e, 0x07, 0x30, 0x04, 0x24, 0xb6, 0x73, 0x01, 0xc9, - 0xc4, 0x89, 0x83, 0x77, 0xd6, 0x7b, 0x75, 0x74, 0xf7, 0x2c, 0x67, 0x47, 0x94, 0xc0, 0x7f, 0xdd, - 0xdf, 0xf7, 0xbd, 0xef, 0xbd, 0x7a, 0xe7, 0xf7, 0xbe, 0xef, 0x7b, 0xdf, 0x07, 0xaf, 0xec, 0xbe, - 0x14, 0xce, 0xb9, 0xfe, 0xfc, 0x6e, 0x7b, 0x93, 0x04, 0x1e, 0x89, 0x48, 0x38, 0xbf, 0x47, 0xbc, - 0xba, 0x1f, 0xcc, 0x0b, 0x84, 0xd3, 0x72, 0xe7, 0x6b, 0x7e, 0x40, 0xe6, 0xf7, 0x2e, 0xcf, 0x6f, - 0x13, 0x8f, 0x04, 0x4e, 0x44, 0xea, 0x73, 0xad, 0xc0, 0x8f, 0x7c, 0x84, 0x38, 0xcd, 0x9c, 0xd3, - 0x72, 0xe7, 0x28, 0xcd, 0xdc, 0xde, 0xe5, 0x99, 0x67, 0xb7, 0xdd, 0x68, 0xa7, 0xbd, 0x39, 0x57, - 0xf3, 0x9b, 0xf3, 0xdb, 0xfe, 0xb6, 0x3f, 0xcf, 0x48, 0x37, 0xdb, 0x5b, 0xec, 0x1f, 0xfb, 0xc3, - 0x7e, 0x71, 0x16, 0x33, 0x2f, 0xc4, 0xd5, 0x34, 0x9d, 0xda, 0x8e, 0xeb, 0x91, 0x60, 0x7f, 0xbe, - 0xb5, 0xbb, 0xcd, 0xea, 0x0d, 0x48, 0xe8, 0xb7, 0x83, 0x1a, 0x49, 0x56, 0xdc, 0xb1, 0x54, 0x38, - 0xdf, 0x24, 0x91, 0x93, 0xd1, 0xdc, 0x99, 0xf9, 0xbc, 0x52, 0x41, 0xdb, 0x8b, 0xdc, 0x66, 0xba, - 0x9a, 0xf7, 0x77, 0x2b, 0x10, 0xd6, 0x76, 0x48, 0xd3, 0x49, 0x95, 0x7b, 0x3e, 0xaf, 0x5c, 0x3b, - 0x72, 0x1b, 0xf3, 0xae, 0x17, 0x85, 0x51, 0x90, 0x2c, 0x64, 0x7f, 0xdd, 0x82, 0x0b, 0x0b, 0xb7, - 0xab, 0x2b, 0x0d, 0x27, 0x8c, 0xdc, 0xda, 0x62, 0xc3, 0xaf, 0xed, 0x56, 0x23, 0x3f, 0x20, 0xb7, - 0xfc, 0x46, 0xbb, 0x49, 0xaa, 0xac, 0x23, 0xd0, 0x33, 0x30, 0xb4, 0xc7, 0xfe, 0x97, 0x97, 0xa7, - 0xad, 0x0b, 0xd6, 0xa5, 0xd2, 0xe2, 0xc4, 0xaf, 0x1d, 0xcc, 0xbe, 0xe7, 0xde, 0xc1, 0xec, 0xd0, - 0x2d, 0x01, 0xc7, 0x8a, 0x02, 0x5d, 0x84, 0x81, 0xad, 0x70, 0x63, 0xbf, 0x45, 0xa6, 0x0b, 0x8c, - 0x76, 0x4c, 0xd0, 0x0e, 0xac, 0x56, 0x29, 0x14, 0x0b, 0x2c, 0x9a, 0x87, 0x52, 0xcb, 0x09, 0x22, - 0x37, 0x72, 0x7d, 0x6f, 0xba, 0x78, 0xc1, 0xba, 0xd4, 0xbf, 0x38, 0x29, 0x48, 0x4b, 0x15, 0x89, - 0xc0, 0x31, 0x0d, 0x6d, 0x46, 0x40, 0x9c, 0xfa, 0x0d, 0xaf, 0xb1, 0x3f, 0xdd, 0x77, 0xc1, 0xba, - 0x34, 0x14, 0x37, 0x03, 0x0b, 0x38, 0x56, 0x14, 0xf6, 0x17, 0x0b, 0x30, 0xb4, 0xb0, 0xb5, 0xe5, - 0x7a, 0x6e, 0xb4, 0x8f, 0x6e, 0xc1, 0x88, 0xe7, 0xd7, 0x89, 0xfc, 0xcf, 0xbe, 0x62, 0xf8, 0xb9, - 0x0b, 0x73, 0xe9, 0xa9, 0x34, 0xb7, 0xae, 0xd1, 0x2d, 0x4e, 0xdc, 0x3b, 0x98, 0x1d, 0xd1, 0x21, - 0xd8, 0xe0, 0x83, 0x30, 0x0c, 0xb7, 0xfc, 0xba, 0x62, 0x5b, 0x60, 0x6c, 0x67, 0xb3, 0xd8, 0x56, - 0x62, 0xb2, 0xc5, 0xf1, 0x7b, 0x07, 0xb3, 0xc3, 0x1a, 0x00, 0xeb, 0x4c, 0xd0, 0x26, 0x8c, 0xd3, - 0xbf, 0x5e, 0xe4, 0x2a, 0xbe, 0x45, 0xc6, 0xf7, 0xb1, 0x3c, 0xbe, 0x1a, 0xe9, 0xe2, 0xd4, 0xbd, - 0x83, 0xd9, 0xf1, 0x04, 0x10, 0x27, 0x19, 0xda, 0x6f, 0xc1, 0xd8, 0x42, 0x14, 0x39, 0xb5, 0x1d, - 0x52, 0xe7, 0x23, 0x88, 0x5e, 0x80, 0x3e, 0xcf, 0x69, 0x12, 0x31, 0xbe, 0x17, 0x44, 0xc7, 0xf6, - 0xad, 0x3b, 0x4d, 0x72, 0x78, 0x30, 0x3b, 0x71, 0xd3, 0x73, 0xdf, 0x6c, 0x8b, 0x59, 0x41, 0x61, - 0x98, 0x51, 0xa3, 0xe7, 0x00, 0xea, 0x64, 0xcf, 0xad, 0x91, 0x8a, 0x13, 0xed, 0x88, 0xf1, 0x46, - 0xa2, 0x2c, 0x2c, 0x2b, 0x0c, 0xd6, 0xa8, 0xec, 0xbb, 0x50, 0x5a, 0xd8, 0xf3, 0xdd, 0x7a, 0xc5, - 0xaf, 0x87, 0x68, 0x17, 0xc6, 0x5b, 0x01, 0xd9, 0x22, 0x81, 0x02, 0x4d, 0x5b, 0x17, 0x8a, 0x97, - 0x86, 0x9f, 0xbb, 0x94, 0xf9, 0xb1, 0x26, 0xe9, 0x8a, 0x17, 0x05, 0xfb, 0x8b, 0x0f, 0x89, 0xfa, - 0xc6, 0x13, 0x58, 0x9c, 0xe4, 0x6c, 0xff, 0xb3, 0x02, 0x9c, 0x5e, 0x78, 0xab, 0x1d, 0x90, 0x65, - 0x37, 0xdc, 0x4d, 0xce, 0xf0, 0xba, 0x1b, 0xee, 0xae, 0xc7, 0x3d, 0xa0, 0xa6, 0xd6, 0xb2, 0x80, - 0x63, 0x45, 0x81, 0x9e, 0x85, 0x41, 0xfa, 0xfb, 0x26, 0x2e, 0x8b, 0x4f, 0x9e, 0x12, 0xc4, 0xc3, - 0xcb, 0x4e, 0xe4, 0x2c, 0x73, 0x14, 0x96, 0x34, 0x68, 0x0d, 0x86, 0x6b, 0x6c, 0x41, 0x6e, 0xaf, - 0xf9, 0x75, 0xc2, 0x06, 0xb3, 0xb4, 0xf8, 0x34, 0x25, 0x5f, 0x8a, 0xc1, 0x87, 0x07, 0xb3, 0xd3, - 0xbc, 0x6d, 0x82, 0x85, 0x86, 0xc3, 0x7a, 0x79, 0x64, 0xab, 0xf5, 0xd5, 0xc7, 0x38, 0x41, 0xc6, - 0xda, 0xba, 0xa4, 0x2d, 0x95, 0x7e, 0xb6, 0x54, 0x46, 0xb2, 0x97, 0x09, 0xba, 0x0c, 0x7d, 0xbb, - 0xae, 0x57, 0x9f, 0x1e, 0x60, 0xbc, 0xce, 0xd1, 0x31, 0xbf, 0xe6, 0x7a, 0xf5, 0xc3, 0x83, 0xd9, - 0x49, 0xa3, 0x39, 0x14, 0x88, 0x19, 0xa9, 0xfd, 0xdf, 0x2d, 0x98, 0x65, 0xb8, 0x55, 0xb7, 0x41, - 0x2a, 0x24, 0x08, 0xdd, 0x30, 0x22, 0x5e, 0x64, 0x74, 0xe8, 0x73, 0x00, 0x21, 0xa9, 0x05, 0x24, - 0xd2, 0xba, 0x54, 0x4d, 0x8c, 0xaa, 0xc2, 0x60, 0x8d, 0x8a, 0x6e, 0x08, 0xe1, 0x8e, 0x13, 0xb0, - 0xf9, 0x25, 0x3a, 0x56, 0x6d, 0x08, 0x55, 0x89, 0xc0, 0x31, 0x8d, 0xb1, 0x21, 0x14, 0xbb, 0x6d, - 0x08, 0xe8, 0x43, 0x30, 0x1e, 0x57, 0x16, 0xb6, 0x9c, 0x9a, 0xec, 0x40, 0xb6, 0x64, 0xaa, 0x26, - 0x0a, 0x27, 0x69, 0xed, 0xbf, 0x69, 0x89, 0xc9, 0x43, 0xbf, 0xfa, 0x1d, 0xfe, 0xad, 0xf6, 0x2f, - 0x58, 0x30, 0xb8, 0xe8, 0x7a, 0x75, 0xd7, 0xdb, 0x46, 0x9f, 0x82, 0x21, 0x7a, 0x36, 0xd5, 0x9d, - 0xc8, 0x11, 0xfb, 0xde, 0xfb, 0xb4, 0xb5, 0xa5, 0x8e, 0x8a, 0xb9, 0xd6, 0xee, 0x36, 0x05, 0x84, - 0x73, 0x94, 0x9a, 0xae, 0xb6, 0x1b, 0x9b, 0x9f, 0x26, 0xb5, 0x68, 0x8d, 0x44, 0x4e, 0xfc, 0x39, - 0x31, 0x0c, 0x2b, 0xae, 0xe8, 0x1a, 0x0c, 0x44, 0x4e, 0xb0, 0x4d, 0x22, 0xb1, 0x01, 0x66, 0x6e, - 0x54, 0xbc, 0x24, 0xa6, 0x2b, 0x92, 0x78, 0x35, 0x12, 0x1f, 0x0b, 0x1b, 0xac, 0x28, 0x16, 0x2c, - 0xec, 0xff, 0x3b, 0x08, 0x67, 0x97, 0xaa, 0xe5, 0x9c, 0x79, 0x75, 0x11, 0x06, 0xea, 0x81, 0xbb, - 0x47, 0x02, 0xd1, 0xcf, 0x8a, 0xcb, 0x32, 0x83, 0x62, 0x81, 0x45, 0x2f, 0xc1, 0x08, 0x3f, 0x90, - 0xae, 0x3a, 0x5e, 0xbd, 0x21, 0xbb, 0xf8, 0x94, 0xa0, 0x1e, 0xb9, 0xa5, 0xe1, 0xb0, 0x41, 0x79, - 0xc4, 0x49, 0x75, 0x31, 0xb1, 0x18, 0xf3, 0x0e, 0xbb, 0xcf, 0x5b, 0x30, 0xc1, 0xab, 0x59, 0x88, - 0xa2, 0xc0, 0xdd, 0x6c, 0x47, 0x24, 0x9c, 0xee, 0x67, 0x3b, 0xdd, 0x52, 0x56, 0x6f, 0xe5, 0xf6, - 0xc0, 0xdc, 0xad, 0x04, 0x17, 0xbe, 0x09, 0x4e, 0x8b, 0x7a, 0x27, 0x92, 0x68, 0x9c, 0xaa, 0x16, - 0x7d, 0xb7, 0x05, 0x33, 0x35, 0xdf, 0x8b, 0x02, 0xbf, 0xd1, 0x20, 0x41, 0xa5, 0xbd, 0xd9, 0x70, - 0xc3, 0x1d, 0x3e, 0x4f, 0x31, 0xd9, 0x62, 0x3b, 0x41, 0xce, 0x18, 0x2a, 0x22, 0x31, 0x86, 0xe7, - 0xef, 0x1d, 0xcc, 0xce, 0x2c, 0xe5, 0xb2, 0xc2, 0x1d, 0xaa, 0x41, 0xbb, 0x80, 0xe8, 0x51, 0x5a, - 0x8d, 0x9c, 0x6d, 0x12, 0x57, 0x3e, 0xd8, 0x7b, 0xe5, 0x67, 0xee, 0x1d, 0xcc, 0xa2, 0xf5, 0x14, - 0x0b, 0x9c, 0xc1, 0x16, 0xbd, 0x09, 0xa7, 0x28, 0x34, 0xf5, 0xad, 0x43, 0xbd, 0x57, 0x37, 0x7d, - 0xef, 0x60, 0xf6, 0xd4, 0x7a, 0x06, 0x13, 0x9c, 0xc9, 0x1a, 0x7d, 0x97, 0x05, 0x67, 0xe3, 0xcf, - 0x5f, 0xb9, 0xdb, 0x72, 0xbc, 0x7a, 0x5c, 0x71, 0xa9, 0xf7, 0x8a, 0xe9, 0x9e, 0x7c, 0x76, 0x29, - 0x8f, 0x13, 0xce, 0xaf, 0x04, 0x79, 0x30, 0x45, 0x9b, 0x96, 0xac, 0x1b, 0x7a, 0xaf, 0xfb, 0xa1, - 0x7b, 0x07, 0xb3, 0x53, 0xeb, 0x69, 0x1e, 0x38, 0x8b, 0xf1, 0xcc, 0x12, 0x9c, 0xce, 0x9c, 0x9d, - 0x68, 0x02, 0x8a, 0xbb, 0x84, 0x4b, 0x5d, 0x25, 0x4c, 0x7f, 0xa2, 0x53, 0xd0, 0xbf, 0xe7, 0x34, - 0xda, 0x62, 0x61, 0x62, 0xfe, 0xe7, 0xe5, 0xc2, 0x4b, 0x96, 0xfd, 0xcf, 0x8b, 0x30, 0xbe, 0x54, - 0x2d, 0xdf, 0xd7, 0xaa, 0xd7, 0x8f, 0xbd, 0x42, 0xc7, 0x63, 0x2f, 0x3e, 0x44, 0x8b, 0xb9, 0x87, - 0xe8, 0x9f, 0xcd, 0x58, 0xb2, 0x7d, 0x6c, 0xc9, 0x7e, 0x47, 0xce, 0x92, 0x3d, 0xe6, 0x85, 0xba, - 0x97, 0x33, 0x6b, 0xfb, 0xd9, 0x00, 0x66, 0x4a, 0x48, 0xd7, 0xfd, 0x9a, 0xd3, 0x48, 0x6e, 0xb5, - 0x47, 0x9c, 0xba, 0xc7, 0x33, 0x8e, 0x35, 0x18, 0x59, 0x72, 0x5a, 0xce, 0xa6, 0xdb, 0x70, 0x23, - 0x97, 0x84, 0xe8, 0x49, 0x28, 0x3a, 0xf5, 0x3a, 0x93, 0xee, 0x4a, 0x8b, 0xa7, 0xef, 0x1d, 0xcc, - 0x16, 0x17, 0xea, 0x54, 0xcc, 0x00, 0x45, 0xb5, 0x8f, 0x29, 0x05, 0x7a, 0x2f, 0xf4, 0xd5, 0x03, - 0xbf, 0x35, 0x5d, 0x60, 0x94, 0x74, 0x95, 0xf7, 0x2d, 0x07, 0x7e, 0x2b, 0x41, 0xca, 0x68, 0xec, - 0x5f, 0x2d, 0xc0, 0x23, 0x4b, 0xa4, 0xb5, 0xb3, 0x5a, 0xcd, 0x39, 0x2f, 0x2e, 0xc1, 0x50, 0xd3, - 0xf7, 0xdc, 0xc8, 0x0f, 0x42, 0x51, 0x35, 0x9b, 0x11, 0x6b, 0x02, 0x86, 0x15, 0x16, 0x5d, 0x80, - 0xbe, 0x56, 0x2c, 0xc4, 0x8e, 0x48, 0x01, 0x98, 0x89, 0xaf, 0x0c, 0x43, 0x29, 0xda, 0x21, 0x09, - 0xc4, 0x8c, 0x51, 0x14, 0x37, 0x43, 0x12, 0x60, 0x86, 0x89, 0x25, 0x01, 0x2a, 0x23, 0x88, 0x13, - 0x21, 0x21, 0x09, 0x50, 0x0c, 0xd6, 0xa8, 0x50, 0x05, 0x4a, 0x61, 0x62, 0x64, 0x7b, 0x5a, 0x9a, - 0xa3, 0x4c, 0x54, 0x50, 0x23, 0x19, 0x33, 0x31, 0x4e, 0xb0, 0x81, 0xae, 0xa2, 0xc2, 0xd7, 0x0a, - 0x80, 0x78, 0x17, 0x7e, 0x8b, 0x75, 0xdc, 0xcd, 0x74, 0xc7, 0xf5, 0xbe, 0x24, 0x8e, 0xab, 0xf7, - 0xfe, 0x87, 0x05, 0x8f, 0x2c, 0xb9, 0x5e, 0x9d, 0x04, 0x39, 0x13, 0xf0, 0xc1, 0xdc, 0x9d, 0x8f, - 0x26, 0xa4, 0x18, 0x53, 0xac, 0xef, 0x18, 0xa6, 0x98, 0xfd, 0x47, 0x16, 0x20, 0xfe, 0xd9, 0xef, - 0xb8, 0x8f, 0xbd, 0x99, 0xfe, 0xd8, 0x63, 0x98, 0x16, 0xf6, 0xdf, 0xb5, 0x60, 0x78, 0xa9, 0xe1, - 0xb8, 0x4d, 0xf1, 0xa9, 0x4b, 0x30, 0x29, 0x15, 0x45, 0x0c, 0xac, 0xc9, 0xfe, 0x74, 0x73, 0x9b, - 0xc4, 0x49, 0x24, 0x4e, 0xd3, 0xa3, 0x8f, 0xc3, 0x59, 0x03, 0xb8, 0x41, 0x9a, 0xad, 0x86, 0x13, - 0xe9, 0xb7, 0x02, 0x76, 0xfa, 0xe3, 0x3c, 0x22, 0x9c, 0x5f, 0xde, 0xbe, 0x0e, 0x63, 0x4b, 0x0d, - 0x97, 0x78, 0x51, 0xb9, 0xb2, 0xe4, 0x7b, 0x5b, 0xee, 0x36, 0x7a, 0x19, 0xc6, 0x22, 0xb7, 0x49, - 0xfc, 0x76, 0x54, 0x25, 0x35, 0xdf, 0x63, 0x77, 0x6d, 0xeb, 0x52, 0xff, 0x22, 0xba, 0x77, 0x30, - 0x3b, 0xb6, 0x61, 0x60, 0x70, 0x82, 0xd2, 0xfe, 0x1d, 0x3a, 0xe2, 0x7e, 0xb3, 0xe5, 0x7b, 0xc4, - 0x8b, 0x96, 0x7c, 0xaf, 0xce, 0x75, 0x32, 0x2f, 0x43, 0x5f, 0x44, 0x47, 0x90, 0x7f, 0xf9, 0x45, - 0xb9, 0xb4, 0xe9, 0xb8, 0x1d, 0x1e, 0xcc, 0x9e, 0x49, 0x97, 0x60, 0x23, 0xcb, 0xca, 0xa0, 0xef, - 0x80, 0x81, 0x30, 0x72, 0xa2, 0x76, 0x28, 0x3e, 0xf5, 0x51, 0x39, 0xfe, 0x55, 0x06, 0x3d, 0x3c, - 0x98, 0x1d, 0x57, 0xc5, 0x38, 0x08, 0x8b, 0x02, 0xe8, 0x29, 0x18, 0x6c, 0x92, 0x30, 0x74, 0xb6, - 0xe5, 0xf9, 0x3d, 0x2e, 0xca, 0x0e, 0xae, 0x71, 0x30, 0x96, 0x78, 0xf4, 0x18, 0xf4, 0x93, 0x20, - 0xf0, 0x03, 0xb1, 0xab, 0x8c, 0x0a, 0xc2, 0xfe, 0x15, 0x0a, 0xc4, 0x1c, 0x67, 0xff, 0x5b, 0x0b, - 0xc6, 0x55, 0x5b, 0x79, 0x5d, 0x27, 0x70, 0x6f, 0xfa, 0x18, 0x40, 0x4d, 0x7e, 0x60, 0xc8, 0xce, - 0xbb, 0xe1, 0xe7, 0x2e, 0x66, 0x8a, 0x16, 0xa9, 0x6e, 0x8c, 0x39, 0x2b, 0x50, 0x88, 0x35, 0x6e, - 0xf6, 0x3f, 0xb2, 0x60, 0x2a, 0xf1, 0x45, 0xd7, 0xdd, 0x30, 0x42, 0x6f, 0xa4, 0xbe, 0x6a, 0xae, - 0xb7, 0xaf, 0xa2, 0xa5, 0xd9, 0x37, 0xa9, 0xc5, 0x27, 0x21, 0xda, 0x17, 0x5d, 0x85, 0x7e, 0x37, - 0x22, 0x4d, 0xf9, 0x31, 0x8f, 0x75, 0xfc, 0x18, 0xde, 0xaa, 0x78, 0x44, 0xca, 0xb4, 0x24, 0xe6, - 0x0c, 0xec, 0x5f, 0x2d, 0x42, 0x89, 0x4f, 0xdb, 0x35, 0xa7, 0x75, 0x02, 0x63, 0xf1, 0x34, 0x94, - 0xdc, 0x66, 0xb3, 0x1d, 0x39, 0x9b, 0xe2, 0x00, 0x1a, 0xe2, 0x9b, 0x41, 0x59, 0x02, 0x71, 0x8c, - 0x47, 0x65, 0xe8, 0x63, 0x4d, 0xe1, 0x5f, 0xf9, 0x64, 0xf6, 0x57, 0x8a, 0xb6, 0xcf, 0x2d, 0x3b, - 0x91, 0xc3, 0x65, 0x3f, 0x75, 0xf2, 0x51, 0x10, 0x66, 0x2c, 0x90, 0x03, 0xb0, 0xe9, 0x7a, 0x4e, - 0xb0, 0x4f, 0x61, 0xd3, 0x45, 0xc6, 0xf0, 0xd9, 0xce, 0x0c, 0x17, 0x15, 0x3d, 0x67, 0xab, 0x3e, - 0x2c, 0x46, 0x60, 0x8d, 0xe9, 0xcc, 0x07, 0xa0, 0xa4, 0x88, 0x8f, 0x22, 0xc2, 0xcd, 0x7c, 0x08, - 0xc6, 0x13, 0x75, 0x75, 0x2b, 0x3e, 0xa2, 0x4b, 0x80, 0xbf, 0xc8, 0xb6, 0x0c, 0xd1, 0xea, 0x15, - 0x6f, 0x4f, 0xec, 0x9c, 0x6f, 0xc1, 0xa9, 0x46, 0xc6, 0xde, 0x2b, 0xc6, 0xb5, 0xf7, 0xbd, 0xfa, - 0x11, 0xf1, 0xd9, 0xa7, 0xb2, 0xb0, 0x38, 0xb3, 0x0e, 0x2a, 0xd5, 0xf8, 0x2d, 0xba, 0x40, 0x9c, - 0x86, 0x7e, 0x41, 0xb8, 0x21, 0x60, 0x58, 0x61, 0xe9, 0x7e, 0x77, 0x4a, 0x35, 0xfe, 0x1a, 0xd9, - 0xaf, 0x92, 0x06, 0xa9, 0x45, 0x7e, 0xf0, 0x4d, 0x6d, 0xfe, 0x39, 0xde, 0xfb, 0x7c, 0xbb, 0x1c, - 0x16, 0x0c, 0x8a, 0xd7, 0xc8, 0x3e, 0x1f, 0x0a, 0xfd, 0xeb, 0x8a, 0x1d, 0xbf, 0xee, 0x2b, 0x16, - 0x8c, 0xaa, 0xaf, 0x3b, 0x81, 0x7d, 0x61, 0xd1, 0xdc, 0x17, 0xce, 0x75, 0x9c, 0xe0, 0x39, 0x3b, - 0xc2, 0xd7, 0x0a, 0x70, 0x56, 0xd1, 0xd0, 0xdb, 0x0c, 0xff, 0x23, 0x66, 0xd5, 0x3c, 0x94, 0x3c, - 0xa5, 0xd7, 0xb3, 0x4c, 0x85, 0x5a, 0xac, 0xd5, 0x8b, 0x69, 0xa8, 0x50, 0xea, 0xc5, 0xc7, 0xec, - 0x88, 0xae, 0xf0, 0x16, 0xca, 0xed, 0x45, 0x28, 0xb6, 0xdd, 0xba, 0x38, 0x60, 0xde, 0x27, 0x7b, - 0xfb, 0x66, 0x79, 0xf9, 0xf0, 0x60, 0xf6, 0xd1, 0x3c, 0x63, 0x0b, 0x3d, 0xd9, 0xc2, 0xb9, 0x9b, - 0xe5, 0x65, 0x4c, 0x0b, 0xa3, 0x05, 0x18, 0x97, 0x27, 0xf4, 0x2d, 0x2a, 0x20, 0xfa, 0x9e, 0x38, - 0x87, 0x94, 0xd6, 0x1a, 0x9b, 0x68, 0x9c, 0xa4, 0x47, 0xcb, 0x30, 0xb1, 0xdb, 0xde, 0x24, 0x0d, - 0x12, 0xf1, 0x0f, 0xbe, 0x46, 0xb8, 0x4e, 0xb7, 0x14, 0xdf, 0x25, 0xaf, 0x25, 0xf0, 0x38, 0x55, - 0xc2, 0xfe, 0x53, 0x76, 0x1e, 0x88, 0xde, 0xab, 0x04, 0x3e, 0x9d, 0x58, 0x94, 0xfb, 0x37, 0x73, - 0x3a, 0xf7, 0x32, 0x2b, 0xae, 0x91, 0xfd, 0x0d, 0x9f, 0xde, 0x25, 0xb2, 0x67, 0x85, 0x31, 0xe7, - 0xfb, 0x3a, 0xce, 0xf9, 0x9f, 0x2d, 0xc0, 0x69, 0xd5, 0x03, 0x86, 0xd8, 0xfa, 0xad, 0xde, 0x07, - 0x97, 0x61, 0xb8, 0x4e, 0xb6, 0x9c, 0x76, 0x23, 0x52, 0x06, 0x86, 0x7e, 0x6e, 0x64, 0x5a, 0x8e, - 0xc1, 0x58, 0xa7, 0x39, 0x42, 0xb7, 0xfd, 0xcf, 0x61, 0x76, 0x10, 0x47, 0x0e, 0x9d, 0xe3, 0x6a, - 0xd5, 0x58, 0xb9, 0xab, 0xe6, 0x31, 0xe8, 0x77, 0x9b, 0x54, 0x30, 0x2b, 0x98, 0xf2, 0x56, 0x99, - 0x02, 0x31, 0xc7, 0xa1, 0x27, 0x60, 0xb0, 0xe6, 0x37, 0x9b, 0x8e, 0x57, 0x67, 0x47, 0x5e, 0x69, - 0x71, 0x98, 0xca, 0x6e, 0x4b, 0x1c, 0x84, 0x25, 0x0e, 0x3d, 0x02, 0x7d, 0x4e, 0xb0, 0xcd, 0xb5, - 0x2e, 0xa5, 0xc5, 0x21, 0x5a, 0xd3, 0x42, 0xb0, 0x1d, 0x62, 0x06, 0xa5, 0x97, 0xc6, 0x3b, 0x7e, - 0xb0, 0xeb, 0x7a, 0xdb, 0xcb, 0x6e, 0x20, 0x96, 0x84, 0x3a, 0x0b, 0x6f, 0x2b, 0x0c, 0xd6, 0xa8, - 0xd0, 0x2a, 0xf4, 0xb7, 0xfc, 0x20, 0x0a, 0xa7, 0x07, 0x58, 0x77, 0x3f, 0x9a, 0xb3, 0x11, 0xf1, - 0xaf, 0xad, 0xf8, 0x41, 0x14, 0x7f, 0x00, 0xfd, 0x17, 0x62, 0x5e, 0x1c, 0x5d, 0x87, 0x41, 0xe2, - 0xed, 0xad, 0x06, 0x7e, 0x73, 0x7a, 0x2a, 0x9f, 0xd3, 0x0a, 0x27, 0xe1, 0xd3, 0x2c, 0x96, 0x51, - 0x05, 0x18, 0x4b, 0x16, 0xe8, 0x3b, 0xa0, 0x48, 0xbc, 0xbd, 0xe9, 0x41, 0xc6, 0x69, 0x26, 0x87, - 0xd3, 0x2d, 0x27, 0x88, 0xf7, 0xfc, 0x15, 0x6f, 0x0f, 0xd3, 0x32, 0xe8, 0xa3, 0x50, 0x92, 0x1b, - 0x46, 0x28, 0xd4, 0x99, 0x99, 0x13, 0x56, 0x6e, 0x33, 0x98, 0xbc, 0xd9, 0x76, 0x03, 0xd2, 0x24, - 0x5e, 0x14, 0xc6, 0x3b, 0xa4, 0xc4, 0x86, 0x38, 0xe6, 0x86, 0x3e, 0x2a, 0x75, 0xe8, 0x6b, 0x7e, - 0xdb, 0x8b, 0xc2, 0xe9, 0x12, 0x6b, 0x5e, 0xa6, 0x75, 0xf3, 0x56, 0x4c, 0x97, 0x54, 0xb2, 0xf3, - 0xc2, 0xd8, 0x60, 0x85, 0x3e, 0x01, 0xa3, 0xfc, 0x3f, 0xb7, 0x11, 0x86, 0xd3, 0xa7, 0x19, 0xef, - 0x0b, 0xf9, 0xbc, 0x39, 0xe1, 0xe2, 0x69, 0xc1, 0x7c, 0x54, 0x87, 0x86, 0xd8, 0xe4, 0x86, 0x30, - 0x8c, 0x36, 0xdc, 0x3d, 0xe2, 0x91, 0x30, 0xac, 0x04, 0xfe, 0x26, 0x11, 0x2a, 0xcf, 0xb3, 0xd9, - 0x36, 0x45, 0x7f, 0x93, 0x2c, 0x4e, 0x52, 0x9e, 0xd7, 0xf5, 0x32, 0xd8, 0x64, 0x81, 0x6e, 0xc2, - 0x18, 0xbd, 0x63, 0xba, 0x31, 0xd3, 0xe1, 0x6e, 0x4c, 0xd9, 0xbd, 0x0a, 0x1b, 0x85, 0x70, 0x82, - 0x09, 0xba, 0x01, 0x23, 0x61, 0xe4, 0x04, 0x51, 0xbb, 0xc5, 0x99, 0x9e, 0xe9, 0xc6, 0x94, 0x99, - 0xa4, 0xab, 0x5a, 0x11, 0x6c, 0x30, 0x40, 0xaf, 0x41, 0xa9, 0xe1, 0x6e, 0x91, 0xda, 0x7e, 0xad, - 0x41, 0xa6, 0x47, 0x18, 0xb7, 0xcc, 0x4d, 0xe5, 0xba, 0x24, 0xe2, 0x72, 0xae, 0xfa, 0x8b, 0xe3, - 0xe2, 0xe8, 0x16, 0x9c, 0x89, 0x48, 0xd0, 0x74, 0x3d, 0x87, 0x6e, 0x06, 0xe2, 0x6a, 0xc5, 0x4c, - 0xbd, 0xa3, 0x6c, 0xb5, 0x9d, 0x17, 0xa3, 0x71, 0x66, 0x23, 0x93, 0x0a, 0xe7, 0x94, 0x46, 0x77, - 0x61, 0x3a, 0x03, 0xe3, 0x37, 0xdc, 0xda, 0xfe, 0xf4, 0x29, 0xc6, 0xf9, 0x83, 0x82, 0xf3, 0xf4, - 0x46, 0x0e, 0xdd, 0x61, 0x07, 0x1c, 0xce, 0xe5, 0x8e, 0x6e, 0xc0, 0x38, 0xdb, 0x81, 0x2a, 0xed, - 0x46, 0x43, 0x54, 0x38, 0xc6, 0x2a, 0x7c, 0x42, 0x9e, 0xc7, 0x65, 0x13, 0x7d, 0x78, 0x30, 0x0b, - 0xf1, 0x3f, 0x9c, 0x2c, 0x8d, 0x36, 0x99, 0x55, 0xb1, 0x1d, 0xb8, 0xd1, 0x3e, 0xdd, 0x37, 0xc8, - 0xdd, 0x68, 0x7a, 0xbc, 0xa3, 0x86, 0x45, 0x27, 0x55, 0xa6, 0x47, 0x1d, 0x88, 0x93, 0x0c, 0xe9, - 0x96, 0x1a, 0x46, 0x75, 0xd7, 0x9b, 0x9e, 0xe0, 0xf7, 0x12, 0xb9, 0x23, 0x55, 0x29, 0x10, 0x73, - 0x1c, 0xb3, 0x28, 0xd2, 0x1f, 0x37, 0xe8, 0xc9, 0x35, 0xc9, 0x08, 0x63, 0x8b, 0xa2, 0x44, 0xe0, - 0x98, 0x86, 0x0a, 0x93, 0x51, 0xb4, 0x3f, 0x8d, 0x18, 0xa9, 0xda, 0x58, 0x36, 0x36, 0x3e, 0x8a, - 0x29, 0xdc, 0xde, 0x84, 0x31, 0xb5, 0x11, 0xb2, 0x3e, 0x41, 0xb3, 0xd0, 0xcf, 0xc4, 0x27, 0xa1, - 0x0f, 0x2c, 0xd1, 0x26, 0x30, 0xd1, 0x0a, 0x73, 0x38, 0x6b, 0x82, 0xfb, 0x16, 0x59, 0xdc, 0x8f, - 0x08, 0xbf, 0xd3, 0x17, 0xb5, 0x26, 0x48, 0x04, 0x8e, 0x69, 0xec, 0xff, 0xc7, 0xc5, 0xd0, 0x78, - 0xb7, 0xed, 0xe1, 0x7c, 0x79, 0x06, 0x86, 0x76, 0xfc, 0x30, 0xa2, 0xd4, 0xac, 0x8e, 0xfe, 0x58, - 0xf0, 0xbc, 0x2a, 0xe0, 0x58, 0x51, 0xa0, 0x57, 0x60, 0xb4, 0xa6, 0x57, 0x20, 0x0e, 0x47, 0xb5, - 0x8d, 0x18, 0xb5, 0x63, 0x93, 0x16, 0xbd, 0x04, 0x43, 0xcc, 0x4b, 0xa6, 0xe6, 0x37, 0x84, 0xd4, - 0x26, 0x4f, 0xf8, 0xa1, 0x8a, 0x80, 0x1f, 0x6a, 0xbf, 0xb1, 0xa2, 0x46, 0x17, 0x61, 0x80, 0x36, - 0xa1, 0x5c, 0x11, 0xc7, 0x92, 0x52, 0x6d, 0x5d, 0x65, 0x50, 0x2c, 0xb0, 0xf6, 0x5f, 0x2c, 0x68, - 0xbd, 0x4c, 0xef, 0xc3, 0x04, 0x55, 0x60, 0xf0, 0x8e, 0xe3, 0x46, 0xae, 0xb7, 0x2d, 0xe4, 0x8f, - 0xa7, 0x3a, 0x9e, 0x51, 0xac, 0xd0, 0x6d, 0x5e, 0x80, 0x9f, 0xa2, 0xe2, 0x0f, 0x96, 0x6c, 0x28, - 0xc7, 0xa0, 0xed, 0x79, 0x94, 0x63, 0xa1, 0x57, 0x8e, 0x98, 0x17, 0xe0, 0x1c, 0xc5, 0x1f, 0x2c, - 0xd9, 0xa0, 0x37, 0x00, 0xe4, 0x0a, 0x23, 0x75, 0xe1, 0x9d, 0xf2, 0x4c, 0x77, 0xa6, 0x1b, 0xaa, - 0xcc, 0xe2, 0x18, 0x3d, 0xa3, 0xe3, 0xff, 0x58, 0xe3, 0x67, 0x47, 0x4c, 0x4e, 0x4b, 0x37, 0x06, - 0x7d, 0x9c, 0x4e, 0x71, 0x27, 0x88, 0x48, 0x7d, 0x21, 0x12, 0x9d, 0xf3, 0xde, 0xde, 0x2e, 0x29, - 0x1b, 0x6e, 0x93, 0xe8, 0xcb, 0x41, 0x30, 0xc1, 0x31, 0x3f, 0xfb, 0xe7, 0x8b, 0x30, 0x9d, 0xd7, - 0x5c, 0x3a, 0xe9, 0xc8, 0x5d, 0x37, 0x5a, 0xa2, 0xe2, 0x95, 0x65, 0x4e, 0xba, 0x15, 0x01, 0xc7, - 0x8a, 0x82, 0x8e, 0x7e, 0xe8, 0x6e, 0xcb, 0x3b, 0x66, 0x7f, 0x3c, 0xfa, 0x55, 0x06, 0xc5, 0x02, - 0x4b, 0xe9, 0x02, 0xe2, 0x84, 0xc2, 0xfd, 0x49, 0x9b, 0x25, 0x98, 0x41, 0xb1, 0xc0, 0xea, 0xda, - 0xae, 0xbe, 0x2e, 0xda, 0x2e, 0xa3, 0x8b, 0xfa, 0x8f, 0xb7, 0x8b, 0xd0, 0x27, 0x01, 0xb6, 0x5c, - 0xcf, 0x0d, 0x77, 0x18, 0xf7, 0x81, 0x23, 0x73, 0x57, 0xc2, 0xd9, 0xaa, 0xe2, 0x82, 0x35, 0x8e, - 0xe8, 0x45, 0x18, 0x56, 0x0b, 0xb0, 0xbc, 0xcc, 0x6c, 0xc1, 0x9a, 0x6f, 0x4d, 0xbc, 0x1b, 0x2d, - 0x63, 0x9d, 0xce, 0xfe, 0x74, 0x72, 0xbe, 0x88, 0x15, 0xa0, 0xf5, 0xaf, 0xd5, 0x6b, 0xff, 0x16, - 0x3a, 0xf7, 0xaf, 0xfd, 0x8d, 0x22, 0x8c, 0x1b, 0x95, 0xb5, 0xc3, 0x1e, 0xf6, 0xac, 0x2b, 0x74, - 0x03, 0x77, 0x22, 0x22, 0xd6, 0x9f, 0xdd, 0x7d, 0xa9, 0xe8, 0x9b, 0x3c, 0x5d, 0x01, 0xbc, 0x3c, - 0xfa, 0x24, 0x94, 0x1a, 0x4e, 0xc8, 0x34, 0x67, 0x44, 0xac, 0xbb, 0x5e, 0x98, 0xc5, 0x17, 0x13, - 0x27, 0x8c, 0xb4, 0x53, 0x93, 0xf3, 0x8e, 0x59, 0xd2, 0x93, 0x86, 0xca, 0x27, 0xd2, 0xbf, 0x4e, - 0x35, 0x82, 0x0a, 0x31, 0xfb, 0x98, 0xe3, 0xd0, 0x4b, 0x30, 0x12, 0x10, 0x36, 0x2b, 0x96, 0xa8, - 0x34, 0xc7, 0xa6, 0x59, 0x7f, 0x2c, 0xf6, 0x61, 0x0d, 0x87, 0x0d, 0xca, 0xf8, 0x6e, 0x30, 0xd0, - 0xe1, 0x6e, 0xf0, 0x14, 0x0c, 0xb2, 0x1f, 0x6a, 0x06, 0xa8, 0xd1, 0x28, 0x73, 0x30, 0x96, 0xf8, - 0xe4, 0x84, 0x19, 0xea, 0x6d, 0xc2, 0xd0, 0xdb, 0x87, 0x98, 0xd4, 0xcc, 0x0e, 0x3f, 0xc4, 0x77, - 0x39, 0x31, 0xe5, 0xb1, 0xc4, 0xd9, 0xef, 0x85, 0xb1, 0x65, 0x87, 0x34, 0x7d, 0x6f, 0xc5, 0xab, - 0xb7, 0x7c, 0xd7, 0x8b, 0xd0, 0x34, 0xf4, 0xb1, 0x43, 0x84, 0x6f, 0x01, 0x7d, 0xb4, 0x22, 0xcc, - 0x20, 0xf6, 0x36, 0x9c, 0x5e, 0xf6, 0xef, 0x78, 0x77, 0x9c, 0xa0, 0xbe, 0x50, 0x29, 0x6b, 0xf7, - 0xeb, 0x75, 0x79, 0xbf, 0xe3, 0x6e, 0x6d, 0x99, 0x5b, 0xaf, 0x56, 0x92, 0x8b, 0xb5, 0xab, 0x6e, - 0x83, 0xe4, 0x68, 0x41, 0xfe, 0x4a, 0xc1, 0xa8, 0x29, 0xa6, 0x57, 0x76, 0x38, 0x2b, 0xd7, 0x0e, - 0xf7, 0x3a, 0x0c, 0x6d, 0xb9, 0xa4, 0x51, 0xc7, 0x64, 0x4b, 0xcc, 0xc4, 0x27, 0xf3, 0x3d, 0x75, - 0x56, 0x29, 0xa5, 0xd4, 0x7a, 0xf1, 0xdb, 0xe1, 0xaa, 0x28, 0x8c, 0x15, 0x1b, 0xb4, 0x0b, 0x13, - 0xf2, 0xc2, 0x20, 0xb1, 0x62, 0x5e, 0x3e, 0xd5, 0xe9, 0x16, 0x62, 0x32, 0x3f, 0x75, 0xef, 0x60, - 0x76, 0x02, 0x27, 0xd8, 0xe0, 0x14, 0x63, 0x7a, 0x1d, 0x6c, 0xd2, 0x1d, 0xb8, 0x8f, 0x75, 0x3f, - 0xbb, 0x0e, 0xb2, 0x9b, 0x2d, 0x83, 0xda, 0x3f, 0x66, 0xc1, 0x43, 0xa9, 0x9e, 0x11, 0x37, 0xfc, - 0x63, 0x1e, 0x85, 0xe4, 0x8d, 0xbb, 0xd0, 0xfd, 0xc6, 0x6d, 0xff, 0x2d, 0x0b, 0x4e, 0xad, 0x34, - 0x5b, 0xd1, 0xfe, 0xb2, 0x6b, 0x1a, 0xcd, 0x3e, 0x00, 0x03, 0x4d, 0x52, 0x77, 0xdb, 0x4d, 0x31, - 0x72, 0xb3, 0x72, 0x97, 0x5a, 0x63, 0xd0, 0xc3, 0x83, 0xd9, 0xd1, 0x6a, 0xe4, 0x07, 0xce, 0x36, - 0xe1, 0x00, 0x2c, 0xc8, 0xd9, 0x5e, 0xef, 0xbe, 0x45, 0xae, 0xbb, 0x4d, 0x57, 0x7a, 0x5e, 0x75, - 0xd4, 0xd9, 0xcd, 0xc9, 0x0e, 0x9d, 0x7b, 0xbd, 0xed, 0x78, 0x91, 0x1b, 0xed, 0x0b, 0x7b, 0x97, - 0x64, 0x82, 0x63, 0x7e, 0xf6, 0xd7, 0x2d, 0x18, 0x97, 0xf3, 0x7e, 0xa1, 0x5e, 0x0f, 0x48, 0x18, - 0xa2, 0x19, 0x28, 0xb8, 0x2d, 0xd1, 0x4a, 0x10, 0xad, 0x2c, 0x94, 0x2b, 0xb8, 0xe0, 0xb6, 0xa4, - 0x58, 0xc6, 0x36, 0xc2, 0xa2, 0x69, 0xfa, 0xbb, 0x2a, 0xe0, 0x58, 0x51, 0xa0, 0x4b, 0x30, 0xe4, - 0xf9, 0x75, 0x6e, 0xe7, 0xe2, 0x47, 0x1a, 0x9b, 0x60, 0xeb, 0x02, 0x86, 0x15, 0x16, 0x55, 0xa0, - 0xc4, 0x1d, 0xc3, 0xe2, 0x49, 0xdb, 0x93, 0x7b, 0x19, 0xfb, 0xb2, 0x0d, 0x59, 0x12, 0xc7, 0x4c, - 0xec, 0x5f, 0xb1, 0x60, 0x44, 0x7e, 0x59, 0x8f, 0x32, 0x27, 0x5d, 0x5a, 0xb1, 0xbc, 0x19, 0x2f, - 0x2d, 0x2a, 0x33, 0x32, 0x8c, 0x21, 0x2a, 0x16, 0x8f, 0x24, 0x2a, 0x5e, 0x86, 0x61, 0xa7, 0xd5, - 0xaa, 0x98, 0x72, 0x26, 0x9b, 0x4a, 0x0b, 0x31, 0x18, 0xeb, 0x34, 0xf6, 0x8f, 0x16, 0x60, 0x4c, - 0x7e, 0x41, 0xb5, 0xbd, 0x19, 0x92, 0x08, 0x6d, 0x40, 0xc9, 0xe1, 0xa3, 0x44, 0xe4, 0x24, 0x7f, - 0x2c, 0x5b, 0x8f, 0x60, 0x0c, 0x69, 0x7c, 0xe0, 0x2f, 0xc8, 0xd2, 0x38, 0x66, 0x84, 0x1a, 0x30, - 0xe9, 0xf9, 0x11, 0xdb, 0xfc, 0x15, 0xbe, 0x93, 0x69, 0x27, 0xc9, 0xfd, 0xac, 0xe0, 0x3e, 0xb9, - 0x9e, 0xe4, 0x82, 0xd3, 0x8c, 0xd1, 0x8a, 0xd4, 0xcd, 0x14, 0xf3, 0x95, 0x01, 0xfa, 0xc0, 0x65, - 0xab, 0x66, 0xec, 0x5f, 0xb2, 0xa0, 0x24, 0xc9, 0x4e, 0xc2, 0x8a, 0xb7, 0x06, 0x83, 0x21, 0x1b, - 0x04, 0xd9, 0x35, 0x76, 0xa7, 0x86, 0xf3, 0xf1, 0x8a, 0xcf, 0x34, 0xfe, 0x3f, 0xc4, 0x92, 0x07, - 0x53, 0xcd, 0xab, 0xe6, 0xbf, 0x43, 0x54, 0xf3, 0xaa, 0x3d, 0x39, 0x87, 0xd2, 0x1f, 0xb0, 0x36, - 0x6b, 0xba, 0x2e, 0x2a, 0x7a, 0xb5, 0x02, 0xb2, 0xe5, 0xde, 0x4d, 0x8a, 0x5e, 0x15, 0x06, 0xc5, - 0x02, 0x8b, 0xde, 0x80, 0x91, 0x9a, 0xd4, 0xc9, 0xc6, 0x2b, 0xfc, 0x62, 0x47, 0xfb, 0x80, 0x32, - 0x25, 0x71, 0x5d, 0xc8, 0x92, 0x56, 0x1e, 0x1b, 0xdc, 0x4c, 0xc7, 0x87, 0x62, 0x37, 0xc7, 0x87, - 0x98, 0x6f, 0xbe, 0x1b, 0xc0, 0x8f, 0x5b, 0x30, 0xc0, 0x75, 0x71, 0xbd, 0xa9, 0x42, 0x35, 0xcb, - 0x5a, 0xdc, 0x77, 0xb7, 0x28, 0x50, 0x58, 0xca, 0xd0, 0x1a, 0x94, 0xd8, 0x0f, 0xa6, 0x4b, 0x2c, - 0xe6, 0xbf, 0x4b, 0xe0, 0xb5, 0xea, 0x0d, 0xbc, 0x25, 0x8b, 0xe1, 0x98, 0x83, 0xfd, 0x23, 0x45, - 0xba, 0xbb, 0xc5, 0xa4, 0xc6, 0xa1, 0x6f, 0x3d, 0xb8, 0x43, 0xbf, 0xf0, 0xa0, 0x0e, 0xfd, 0x6d, - 0x18, 0xaf, 0x69, 0x76, 0xb8, 0x78, 0x24, 0x2f, 0x75, 0x9c, 0x24, 0x9a, 0xc9, 0x8e, 0x6b, 0x59, - 0x96, 0x4c, 0x26, 0x38, 0xc9, 0x15, 0x7d, 0x1c, 0x46, 0xf8, 0x38, 0x8b, 0x5a, 0xb8, 0xef, 0xc8, - 0x13, 0xf9, 0xf3, 0x45, 0xaf, 0x82, 0x6b, 0xe5, 0xb4, 0xe2, 0xd8, 0x60, 0x66, 0xff, 0xb1, 0x05, - 0x68, 0xa5, 0xb5, 0x43, 0x9a, 0x24, 0x70, 0x1a, 0xb1, 0x3a, 0xfd, 0x07, 0x2d, 0x98, 0x26, 0x29, - 0xf0, 0x92, 0xdf, 0x6c, 0x8a, 0x4b, 0x4b, 0xce, 0xbd, 0x7a, 0x25, 0xa7, 0x8c, 0x7a, 0xb8, 0x31, - 0x9d, 0x47, 0x81, 0x73, 0xeb, 0x43, 0x6b, 0x30, 0xc5, 0x4f, 0x49, 0x85, 0xd0, 0xfc, 0x50, 0x1e, - 0x16, 0x8c, 0xa7, 0x36, 0xd2, 0x24, 0x38, 0xab, 0x9c, 0xfd, 0x3d, 0x23, 0x90, 0xdb, 0x8a, 0x77, - 0xed, 0x08, 0xef, 0xda, 0x11, 0xde, 0xb5, 0x23, 0xbc, 0x6b, 0x47, 0x78, 0xd7, 0x8e, 0xf0, 0x6d, - 0x6f, 0x47, 0xf8, 0x4b, 0x16, 0x9c, 0x56, 0xc7, 0x80, 0x71, 0xf1, 0xfd, 0x0c, 0x4c, 0xf1, 0xe5, - 0x66, 0xf8, 0x2e, 0x8a, 0x63, 0xef, 0x72, 0xe6, 0xcc, 0x4d, 0xf8, 0xd8, 0x1a, 0x05, 0xf9, 0x63, - 0x85, 0x0c, 0x04, 0xce, 0xaa, 0xc6, 0xfe, 0xf9, 0x21, 0xe8, 0x5f, 0xd9, 0x23, 0x5e, 0x74, 0x02, - 0x57, 0x84, 0x1a, 0x8c, 0xb9, 0xde, 0x9e, 0xdf, 0xd8, 0x23, 0x75, 0x8e, 0x3f, 0xca, 0x4d, 0xf6, - 0x8c, 0x60, 0x3d, 0x56, 0x36, 0x58, 0xe0, 0x04, 0xcb, 0x07, 0xa1, 0x4d, 0xbe, 0x02, 0x03, 0x7c, - 0x13, 0x17, 0xaa, 0xe4, 0xcc, 0x3d, 0x9b, 0x75, 0xa2, 0x38, 0x9a, 0x62, 0x4d, 0x37, 0x3f, 0x24, - 0x44, 0x71, 0xf4, 0x69, 0x18, 0xdb, 0x72, 0x83, 0x30, 0xda, 0x70, 0x9b, 0x24, 0x8c, 0x9c, 0x66, - 0xeb, 0x3e, 0xb4, 0xc7, 0xaa, 0x1f, 0x56, 0x0d, 0x4e, 0x38, 0xc1, 0x19, 0x6d, 0xc3, 0x68, 0xc3, - 0xd1, 0xab, 0x1a, 0x3c, 0x72, 0x55, 0xea, 0x74, 0xb8, 0xae, 0x33, 0xc2, 0x26, 0x5f, 0xba, 0x9c, - 0x6a, 0x4c, 0x01, 0x3a, 0xc4, 0xd4, 0x02, 0x6a, 0x39, 0x71, 0xcd, 0x27, 0xc7, 0x51, 0x41, 0x87, - 0x39, 0xc8, 0x96, 0x4c, 0x41, 0x47, 0x73, 0x83, 0xfd, 0x14, 0x94, 0x08, 0xed, 0x42, 0xca, 0x58, - 0x1c, 0x30, 0xf3, 0xbd, 0xb5, 0x75, 0xcd, 0xad, 0x05, 0xbe, 0xa9, 0xb7, 0x5f, 0x91, 0x9c, 0x70, - 0xcc, 0x14, 0x2d, 0xc1, 0x40, 0x48, 0x02, 0x97, 0x84, 0xe2, 0xa8, 0xe9, 0x30, 0x8c, 0x8c, 0x8c, - 0xbf, 0x86, 0xe1, 0xbf, 0xb1, 0x28, 0x4a, 0xa7, 0x97, 0xc3, 0x54, 0x9a, 0xec, 0x30, 0xd0, 0xa6, - 0xd7, 0x02, 0x83, 0x62, 0x81, 0x45, 0xaf, 0xc1, 0x60, 0x40, 0x1a, 0xcc, 0x30, 0x34, 0xda, 0xfb, - 0x24, 0xe7, 0x76, 0x26, 0x5e, 0x0e, 0x4b, 0x06, 0xe8, 0x1a, 0xa0, 0x80, 0x50, 0x41, 0xc9, 0xf5, - 0xb6, 0x95, 0xdb, 0xa8, 0xd8, 0x68, 0x95, 0x40, 0x8a, 0x63, 0x0a, 0xf9, 0x10, 0x0a, 0x67, 0x14, - 0x43, 0x57, 0x60, 0x52, 0x41, 0xcb, 0x5e, 0x18, 0x39, 0x74, 0x83, 0x1b, 0x67, 0xbc, 0x94, 0x9e, - 0x02, 0x27, 0x09, 0x70, 0xba, 0x8c, 0xfd, 0x25, 0x0b, 0x78, 0x3f, 0x9f, 0xc0, 0xed, 0xfc, 0x55, - 0xf3, 0x76, 0x7e, 0x36, 0x77, 0xe4, 0x72, 0x6e, 0xe6, 0x5f, 0xb2, 0x60, 0x58, 0x1b, 0xd9, 0x78, - 0xce, 0x5a, 0x1d, 0xe6, 0x6c, 0x1b, 0x26, 0xe8, 0x4c, 0xbf, 0xb1, 0x19, 0x92, 0x60, 0x8f, 0xd4, - 0xd9, 0xc4, 0x2c, 0xdc, 0xdf, 0xc4, 0x54, 0x2e, 0x6a, 0xd7, 0x13, 0x0c, 0x71, 0xaa, 0x0a, 0xfb, - 0x53, 0xb2, 0xa9, 0xca, 0xa3, 0xaf, 0xa6, 0xc6, 0x3c, 0xe1, 0xd1, 0xa7, 0x46, 0x15, 0xc7, 0x34, - 0x74, 0xa9, 0xed, 0xf8, 0x61, 0x94, 0xf4, 0xe8, 0xbb, 0xea, 0x87, 0x11, 0x66, 0x18, 0xfb, 0x79, - 0x80, 0x95, 0xbb, 0xa4, 0xc6, 0x67, 0xac, 0x7e, 0x79, 0xb0, 0xf2, 0x2f, 0x0f, 0xf6, 0x6f, 0x5a, - 0x30, 0xb6, 0xba, 0x64, 0x9c, 0x5c, 0x73, 0x00, 0xfc, 0xc6, 0x73, 0xfb, 0xf6, 0xba, 0x34, 0x87, - 0x73, 0x8b, 0xa6, 0x82, 0x62, 0x8d, 0x02, 0x9d, 0x85, 0x62, 0xa3, 0xed, 0x09, 0xf5, 0xe1, 0x20, - 0x3d, 0x1e, 0xaf, 0xb7, 0x3d, 0x4c, 0x61, 0xda, 0x23, 0x88, 0x62, 0xcf, 0x8f, 0x20, 0xba, 0x06, - 0x3f, 0x40, 0xb3, 0xd0, 0x7f, 0xe7, 0x8e, 0x5b, 0xe7, 0x4f, 0x4c, 0x85, 0xa9, 0xfe, 0xf6, 0xed, - 0xf2, 0x72, 0x88, 0x39, 0xdc, 0xfe, 0x42, 0x11, 0x66, 0x56, 0x1b, 0xe4, 0xee, 0xdb, 0x7c, 0x66, - 0xdb, 0xeb, 0x13, 0x8e, 0xa3, 0x29, 0x62, 0x8e, 0xfa, 0x4c, 0xa7, 0x7b, 0x7f, 0x6c, 0xc1, 0x20, - 0x77, 0x68, 0x93, 0x8f, 0x6e, 0x5f, 0xc9, 0xaa, 0x3d, 0xbf, 0x43, 0xe6, 0xb8, 0x63, 0x9c, 0x78, - 0xc3, 0xa7, 0x0e, 0x4c, 0x01, 0xc5, 0x92, 0xf9, 0xcc, 0xcb, 0x30, 0xa2, 0x53, 0x1e, 0xe9, 0xc1, - 0xdc, 0x9f, 0x2b, 0xc2, 0x04, 0x6d, 0xc1, 0x03, 0x1d, 0x88, 0x9b, 0xe9, 0x81, 0x38, 0xee, 0x47, - 0x53, 0xdd, 0x47, 0xe3, 0x8d, 0xe4, 0x68, 0x5c, 0xce, 0x1b, 0x8d, 0x93, 0x1e, 0x83, 0xef, 0xb6, - 0x60, 0x6a, 0xb5, 0xe1, 0xd7, 0x76, 0x13, 0x0f, 0x9b, 0x5e, 0x84, 0x61, 0xba, 0x1d, 0x87, 0xc6, - 0x1b, 0x7f, 0x23, 0xea, 0x83, 0x40, 0x61, 0x9d, 0x4e, 0x2b, 0x76, 0xf3, 0x66, 0x79, 0x39, 0x2b, - 0x58, 0x84, 0x40, 0x61, 0x9d, 0xce, 0xfe, 0x75, 0x0b, 0xce, 0x5d, 0x59, 0x5a, 0x89, 0xa7, 0x62, - 0x2a, 0x5e, 0xc5, 0x45, 0x18, 0x68, 0xd5, 0xb5, 0xa6, 0xc4, 0xea, 0xd5, 0x65, 0xd6, 0x0a, 0x81, - 0x7d, 0xa7, 0xc4, 0x62, 0xb9, 0x09, 0x70, 0x05, 0x57, 0x96, 0xc4, 0xbe, 0x2b, 0xad, 0x29, 0x56, - 0xae, 0x35, 0xe5, 0x09, 0x18, 0xa4, 0xe7, 0x82, 0x5b, 0x93, 0xed, 0xe6, 0x06, 0x5a, 0x0e, 0xc2, - 0x12, 0x67, 0xff, 0x8c, 0x05, 0x53, 0x57, 0xdc, 0x88, 0x1e, 0xda, 0xc9, 0x80, 0x0c, 0xf4, 0xd4, - 0x0e, 0xdd, 0xc8, 0x0f, 0xf6, 0x93, 0x01, 0x19, 0xb0, 0xc2, 0x60, 0x8d, 0x8a, 0x7f, 0xd0, 0x9e, - 0xcb, 0x3c, 0xb4, 0x0b, 0xa6, 0xfd, 0x0a, 0x0b, 0x38, 0x56, 0x14, 0xb4, 0xbf, 0xea, 0x6e, 0xc0, - 0x54, 0x7f, 0xfb, 0x62, 0xe3, 0x56, 0xfd, 0xb5, 0x2c, 0x11, 0x38, 0xa6, 0xb1, 0xff, 0xd0, 0x82, - 0xd9, 0x2b, 0x8d, 0x76, 0x18, 0x91, 0x60, 0x2b, 0xcc, 0xd9, 0x74, 0x9f, 0x87, 0x12, 0x91, 0x8a, - 0x76, 0xf9, 0x94, 0x4c, 0x0a, 0xa2, 0x4a, 0x03, 0xcf, 0xe3, 0x42, 0x28, 0xba, 0x1e, 0x5e, 0x5f, - 0x1e, 0xed, 0xf9, 0xdc, 0x2a, 0x20, 0xa2, 0xd7, 0xa5, 0x07, 0xca, 0x60, 0x2f, 0xee, 0x57, 0x52, - 0x58, 0x9c, 0x51, 0xc2, 0xfe, 0x31, 0x0b, 0x4e, 0xab, 0x0f, 0x7e, 0xc7, 0x7d, 0xa6, 0xfd, 0xd5, - 0x02, 0x8c, 0x5e, 0xdd, 0xd8, 0xa8, 0x5c, 0x21, 0x91, 0x36, 0x2b, 0x3b, 0x9b, 0xcf, 0xb1, 0x66, - 0x05, 0xec, 0x74, 0x47, 0x6c, 0x47, 0x6e, 0x63, 0x8e, 0xc7, 0x5b, 0x9a, 0x2b, 0x7b, 0xd1, 0x8d, - 0xa0, 0x1a, 0x05, 0xae, 0xb7, 0x9d, 0x39, 0xd3, 0xa5, 0xcc, 0x52, 0xcc, 0x93, 0x59, 0xd0, 0xf3, - 0x30, 0xc0, 0x02, 0x3e, 0xc9, 0x41, 0x78, 0x58, 0x5d, 0xb1, 0x18, 0xf4, 0xf0, 0x60, 0xb6, 0x74, - 0x13, 0x97, 0xf9, 0x1f, 0x2c, 0x48, 0xd1, 0x4d, 0x18, 0xde, 0x89, 0xa2, 0xd6, 0x55, 0xe2, 0xd4, - 0x49, 0x20, 0x77, 0xd9, 0xf3, 0x59, 0xbb, 0x2c, 0xed, 0x04, 0x4e, 0x16, 0x6f, 0x4c, 0x31, 0x2c, - 0xc4, 0x3a, 0x1f, 0xbb, 0x0a, 0x10, 0xe3, 0x8e, 0xc9, 0x00, 0x62, 0x6f, 0x40, 0x89, 0x7e, 0xee, - 0x42, 0xc3, 0x75, 0x3a, 0x9b, 0x98, 0x9f, 0x86, 0x92, 0x34, 0x20, 0x87, 0xe2, 0x75, 0x38, 0x3b, - 0x91, 0xa4, 0x7d, 0x39, 0xc4, 0x31, 0xde, 0xde, 0x82, 0x53, 0xcc, 0x1d, 0xd0, 0x89, 0x76, 0x8c, - 0xd9, 0xd7, 0x7d, 0x98, 0x9f, 0x11, 0x37, 0x36, 0xde, 0xe6, 0x69, 0xed, 0x39, 0xe3, 0x88, 0xe4, - 0x18, 0xdf, 0xde, 0xec, 0x6f, 0xf4, 0xc1, 0xc3, 0xe5, 0x6a, 0x7e, 0xc0, 0x92, 0x97, 0x60, 0x84, - 0x0b, 0x82, 0x74, 0xd0, 0x9d, 0x86, 0xa8, 0x57, 0xe9, 0x36, 0x37, 0x34, 0x1c, 0x36, 0x28, 0xd1, - 0x39, 0x28, 0xba, 0x6f, 0x7a, 0xc9, 0xc7, 0x3e, 0xe5, 0xd7, 0xd7, 0x31, 0x85, 0x53, 0x34, 0x95, - 0x29, 0xf9, 0x66, 0xad, 0xd0, 0x4a, 0xae, 0x7c, 0x15, 0xc6, 0xdc, 0xb0, 0x16, 0xba, 0x65, 0x8f, - 0xae, 0x40, 0x6d, 0x0d, 0x2b, 0x6d, 0x02, 0x6d, 0xb4, 0xc2, 0xe2, 0x04, 0xb5, 0x76, 0x72, 0xf4, - 0xf7, 0x2c, 0x97, 0x76, 0x7d, 0x2e, 0x4d, 0x37, 0xf6, 0x16, 0xfb, 0xba, 0x90, 0x29, 0xa9, 0xc5, - 0xc6, 0xce, 0x3f, 0x38, 0xc4, 0x12, 0x47, 0xaf, 0x6a, 0xb5, 0x1d, 0xa7, 0xb5, 0xd0, 0x8e, 0x76, - 0x96, 0xdd, 0xb0, 0xe6, 0xef, 0x91, 0x60, 0x9f, 0xdd, 0xb2, 0x87, 0xe2, 0xab, 0x9a, 0x42, 0x2c, - 0x5d, 0x5d, 0xa8, 0x50, 0x4a, 0x9c, 0x2e, 0x83, 0x16, 0x60, 0x5c, 0x02, 0xab, 0x24, 0x64, 0x9b, - 0xfb, 0x30, 0x63, 0xa3, 0x9e, 0xdf, 0x08, 0xb0, 0x62, 0x92, 0xa4, 0x37, 0x45, 0x57, 0x38, 0x0e, - 0xd1, 0xf5, 0x03, 0x30, 0xea, 0x7a, 0x6e, 0xe4, 0x3a, 0x91, 0xcf, 0x2d, 0x2c, 0xfc, 0x42, 0xcd, - 0x54, 0xc7, 0x65, 0x1d, 0x81, 0x4d, 0x3a, 0xfb, 0x3f, 0xf5, 0xc1, 0x24, 0x1b, 0xb6, 0x77, 0x67, - 0xd8, 0xb7, 0xd3, 0x0c, 0xbb, 0x99, 0x9e, 0x61, 0xc7, 0x21, 0x93, 0xdf, 0xf7, 0x34, 0xfb, 0x34, - 0x94, 0xd4, 0x8b, 0x23, 0xf9, 0xe4, 0xd0, 0xca, 0x79, 0x72, 0xd8, 0xfd, 0x5c, 0x96, 0x4e, 0x5b, - 0xc5, 0x4c, 0xa7, 0xad, 0x2f, 0x5b, 0x10, 0x9b, 0x0c, 0xd0, 0xeb, 0x50, 0x6a, 0xf9, 0xcc, 0x17, - 0x31, 0x90, 0x0e, 0xbe, 0x8f, 0x77, 0xb4, 0x39, 0xf0, 0x98, 0x4d, 0x01, 0xef, 0x85, 0x8a, 0x2c, - 0x8a, 0x63, 0x2e, 0xe8, 0x1a, 0x0c, 0xb6, 0x02, 0x52, 0x8d, 0x58, 0x40, 0x91, 0xde, 0x19, 0xf2, - 0x59, 0xc3, 0x0b, 0x62, 0xc9, 0xc1, 0xfe, 0xcf, 0x16, 0x4c, 0x24, 0x49, 0xd1, 0x07, 0xa1, 0x8f, - 0xdc, 0x25, 0x35, 0xd1, 0xde, 0xcc, 0x43, 0x36, 0x56, 0x3a, 0xf0, 0x0e, 0xa0, 0xff, 0x31, 0x2b, - 0x85, 0xae, 0xc2, 0x20, 0x3d, 0x61, 0xaf, 0xa8, 0xe0, 0x59, 0x8f, 0xe6, 0x9d, 0xd2, 0x4a, 0x54, - 0xe1, 0x8d, 0x13, 0x20, 0x2c, 0x8b, 0x33, 0x4f, 0xa9, 0x5a, 0xab, 0x4a, 0x2f, 0x2f, 0x51, 0xa7, - 0x3b, 0xf6, 0xc6, 0x52, 0x85, 0x13, 0x09, 0x6e, 0xdc, 0x53, 0x4a, 0x02, 0x71, 0xcc, 0xc4, 0xfe, - 0x59, 0x0b, 0x80, 0x3b, 0x86, 0x39, 0xde, 0x36, 0x39, 0x01, 0x3d, 0xf9, 0x32, 0xf4, 0x85, 0x2d, - 0x52, 0xeb, 0xe4, 0x26, 0x1b, 0xb7, 0xa7, 0xda, 0x22, 0xb5, 0x78, 0xc6, 0xd1, 0x7f, 0x98, 0x95, - 0xb6, 0xbf, 0x17, 0x60, 0x2c, 0x26, 0x2b, 0x47, 0xa4, 0x89, 0x9e, 0x35, 0xc2, 0x14, 0x9c, 0x4d, - 0x84, 0x29, 0x28, 0x31, 0x6a, 0x4d, 0x25, 0xfb, 0x69, 0x28, 0x36, 0x9d, 0xbb, 0x42, 0xe7, 0xf6, - 0x74, 0xe7, 0x66, 0x50, 0xfe, 0x73, 0x6b, 0xce, 0x5d, 0x7e, 0x2d, 0x7d, 0x5a, 0xae, 0x90, 0x35, - 0xe7, 0xee, 0x21, 0x77, 0x86, 0x65, 0xbb, 0xf4, 0x75, 0x37, 0x8c, 0x3e, 0xf7, 0x1f, 0xe3, 0xff, - 0x6c, 0xdd, 0xd1, 0x4a, 0x58, 0x5d, 0xae, 0x27, 0x7c, 0x9e, 0x7a, 0xaa, 0xcb, 0xf5, 0x92, 0x75, - 0xb9, 0x5e, 0x0f, 0x75, 0xb9, 0x1e, 0x7a, 0x0b, 0x06, 0x85, 0x4b, 0xa2, 0x08, 0x64, 0x34, 0xdf, - 0x43, 0x7d, 0xc2, 0xa3, 0x91, 0xd7, 0x39, 0x2f, 0xaf, 0xdd, 0x02, 0xda, 0xb5, 0x5e, 0x59, 0x21, - 0xfa, 0xcb, 0x16, 0x8c, 0x89, 0xdf, 0x98, 0xbc, 0xd9, 0x26, 0x61, 0x24, 0xc4, 0xd2, 0xf7, 0xf7, - 0xde, 0x06, 0x51, 0x90, 0x37, 0xe5, 0xfd, 0xf2, 0x9c, 0x31, 0x91, 0x5d, 0x5b, 0x94, 0x68, 0x05, - 0xfa, 0x3b, 0x16, 0x9c, 0x6a, 0x3a, 0x77, 0x79, 0x8d, 0x1c, 0x86, 0x9d, 0xc8, 0xf5, 0x85, 0x69, - 0xff, 0x83, 0xbd, 0x0d, 0x7f, 0xaa, 0x38, 0x6f, 0xa4, 0xb4, 0x3f, 0x9e, 0xca, 0x22, 0xe9, 0xda, - 0xd4, 0xcc, 0x76, 0xcd, 0x6c, 0xc1, 0x90, 0x9c, 0x6f, 0x19, 0xca, 0x8d, 0x65, 0x5d, 0xe6, 0x3e, - 0xb2, 0x47, 0xa8, 0xfe, 0xfc, 0x9f, 0xd6, 0x23, 0xe6, 0xda, 0x03, 0xad, 0xe7, 0xd3, 0x30, 0xa2, - 0xcf, 0xb1, 0x07, 0x5a, 0xd7, 0x9b, 0x30, 0x95, 0x31, 0x97, 0x1e, 0x68, 0x95, 0x77, 0xe0, 0x6c, - 0xee, 0xfc, 0x78, 0x90, 0x15, 0xdb, 0x5f, 0xb5, 0xf4, 0x7d, 0xf0, 0x04, 0x8c, 0x15, 0x4b, 0xa6, - 0xb1, 0xe2, 0x7c, 0xe7, 0x95, 0x93, 0x63, 0xb1, 0x78, 0x43, 0x6f, 0x34, 0xdd, 0xd5, 0xd1, 0x6b, - 0x30, 0xd0, 0xa0, 0x10, 0xe9, 0xd8, 0x6a, 0x77, 0x5f, 0x91, 0xb1, 0x30, 0xc9, 0xe0, 0x21, 0x16, - 0x1c, 0xec, 0x5f, 0xb0, 0xa0, 0xef, 0x04, 0x7a, 0x02, 0x9b, 0x3d, 0xf1, 0x6c, 0x2e, 0x6b, 0x11, - 0xd3, 0x79, 0x0e, 0x3b, 0x77, 0x56, 0xee, 0x46, 0xc4, 0x0b, 0xd9, 0x89, 0x9c, 0xd9, 0x31, 0x3f, - 0x69, 0xc1, 0xd4, 0x75, 0xdf, 0xa9, 0x2f, 0x3a, 0x0d, 0xc7, 0xab, 0x91, 0xa0, 0xec, 0x6d, 0x1f, - 0xc9, 0x2b, 0xbb, 0xd0, 0xd5, 0x2b, 0x7b, 0x49, 0x3a, 0x35, 0xf5, 0xe5, 0x8f, 0x1f, 0x95, 0xa4, - 0x93, 0x81, 0x5b, 0x0c, 0xf7, 0xdb, 0x1d, 0x40, 0x7a, 0x2b, 0xc5, 0x1b, 0x19, 0x0c, 0x83, 0x2e, - 0x6f, 0xaf, 0x18, 0xc4, 0x27, 0xb3, 0x25, 0xdc, 0xd4, 0xe7, 0x69, 0xaf, 0x3f, 0x38, 0x00, 0x4b, - 0x46, 0xf6, 0x4b, 0x90, 0xf9, 0xd0, 0xbe, 0xbb, 0x5e, 0xc2, 0xfe, 0x28, 0x4c, 0xb2, 0x92, 0x47, - 0xd4, 0x0c, 0xd8, 0x09, 0x6d, 0x6a, 0x46, 0xd0, 0x40, 0xfb, 0xf3, 0x16, 0x8c, 0xaf, 0x27, 0x62, - 0xa9, 0x5d, 0x64, 0xf6, 0xd7, 0x0c, 0x25, 0x7e, 0x95, 0x41, 0xb1, 0xc0, 0x1e, 0xbb, 0x92, 0xeb, - 0x4f, 0x2d, 0x88, 0x63, 0x5f, 0x9c, 0x80, 0xf8, 0xb6, 0x64, 0x88, 0x6f, 0x99, 0x82, 0xac, 0x6a, - 0x4e, 0x9e, 0xf4, 0x86, 0xae, 0xa9, 0xa8, 0x50, 0x1d, 0x64, 0xd8, 0x98, 0x0d, 0x9f, 0x8a, 0x63, - 0x66, 0xe8, 0x28, 0x19, 0x27, 0xca, 0xfe, 0xad, 0x02, 0x20, 0x45, 0xdb, 0x73, 0xd4, 0xaa, 0x74, - 0x89, 0xe3, 0x89, 0x5a, 0xb5, 0x07, 0x88, 0x79, 0x10, 0x04, 0x8e, 0x17, 0x72, 0xb6, 0xae, 0x50, - 0xeb, 0x1d, 0xcd, 0x3d, 0x61, 0x46, 0x54, 0x89, 0xae, 0xa7, 0xb8, 0xe1, 0x8c, 0x1a, 0x34, 0xcf, - 0x90, 0xfe, 0x5e, 0x3d, 0x43, 0x06, 0xba, 0xbc, 0x83, 0xfb, 0x8a, 0x05, 0xa3, 0xaa, 0x9b, 0xde, - 0x21, 0x5e, 0xea, 0xaa, 0x3d, 0x39, 0x1b, 0x68, 0x45, 0x6b, 0x32, 0x3b, 0x58, 0xbe, 0x93, 0xbd, - 0x67, 0x74, 0x1a, 0xee, 0x5b, 0x44, 0x45, 0x39, 0x9c, 0x15, 0xef, 0x13, 0x05, 0xf4, 0xf0, 0x60, - 0x76, 0x54, 0xfd, 0xe3, 0x51, 0x9c, 0xe3, 0x22, 0x74, 0x4b, 0x1e, 0x4f, 0x4c, 0x45, 0xf4, 0x22, - 0xf4, 0xb7, 0x76, 0x9c, 0x90, 0x24, 0x5e, 0xf3, 0xf4, 0x57, 0x28, 0xf0, 0xf0, 0x60, 0x76, 0x4c, - 0x15, 0x60, 0x10, 0xcc, 0xa9, 0x7b, 0x8f, 0x05, 0x96, 0x9e, 0x9c, 0x5d, 0x63, 0x81, 0xfd, 0xb1, - 0x05, 0x7d, 0xeb, 0x7e, 0xfd, 0x24, 0xb6, 0x80, 0x57, 0x8d, 0x2d, 0xe0, 0x91, 0xbc, 0x00, 0xfb, - 0xb9, 0xab, 0x7f, 0x35, 0xb1, 0xfa, 0xcf, 0xe7, 0x72, 0xe8, 0xbc, 0xf0, 0x9b, 0x30, 0xcc, 0xc2, - 0xf6, 0x8b, 0x97, 0x4b, 0xcf, 0x1b, 0x0b, 0x7e, 0x36, 0xb1, 0xe0, 0xc7, 0x35, 0x52, 0x6d, 0xa5, - 0x3f, 0x05, 0x83, 0xe2, 0x29, 0x4c, 0xf2, 0x59, 0xa8, 0xa0, 0xc5, 0x12, 0x6f, 0xff, 0x78, 0x11, - 0x8c, 0x34, 0x01, 0xe8, 0x97, 0x2c, 0x98, 0x0b, 0xb8, 0x8b, 0x6c, 0x7d, 0xb9, 0x1d, 0xb8, 0xde, - 0x76, 0xb5, 0xb6, 0x43, 0xea, 0xed, 0x86, 0xeb, 0x6d, 0x97, 0xb7, 0x3d, 0x5f, 0x81, 0x57, 0xee, - 0x92, 0x5a, 0x9b, 0x99, 0xdd, 0xba, 0xe4, 0x24, 0x50, 0xae, 0xe6, 0xcf, 0xdd, 0x3b, 0x98, 0x9d, - 0xc3, 0x47, 0xe2, 0x8d, 0x8f, 0xd8, 0x16, 0xf4, 0xeb, 0x16, 0xcc, 0xf3, 0xe8, 0xf9, 0xbd, 0xb7, - 0xbf, 0xc3, 0x6d, 0xb9, 0x22, 0x59, 0xc5, 0x4c, 0x36, 0x48, 0xd0, 0x5c, 0xfc, 0x80, 0xe8, 0xd0, - 0xf9, 0xca, 0xd1, 0xea, 0xc2, 0x47, 0x6d, 0x9c, 0xfd, 0x8f, 0x8b, 0x30, 0x2a, 0x62, 0x46, 0x89, - 0x33, 0xe0, 0x45, 0x63, 0x4a, 0x3c, 0x9a, 0x98, 0x12, 0x93, 0x06, 0xf1, 0xf1, 0x6c, 0xff, 0x21, - 0x4c, 0xd2, 0xcd, 0xf9, 0x2a, 0x71, 0x82, 0x68, 0x93, 0x38, 0xdc, 0xe1, 0xab, 0x78, 0xe4, 0xdd, - 0x5f, 0xe9, 0x27, 0xaf, 0x27, 0x99, 0xe1, 0x34, 0xff, 0x6f, 0xa7, 0x33, 0xc7, 0x83, 0x89, 0x54, - 0xd8, 0xaf, 0x8f, 0x41, 0x49, 0xbd, 0xe3, 0x10, 0x9b, 0x4e, 0xe7, 0xe8, 0x79, 0x49, 0x0e, 0x5c, - 0xfd, 0x15, 0xbf, 0x21, 0x8a, 0xd9, 0xd9, 0x7f, 0xaf, 0x60, 0x54, 0xc8, 0x07, 0x71, 0x1d, 0x86, - 0x9c, 0x30, 0x74, 0xb7, 0x3d, 0x52, 0xef, 0xa4, 0xa1, 0x4c, 0x55, 0xc3, 0xde, 0xd2, 0x2c, 0x88, - 0x92, 0x58, 0xf1, 0x40, 0x57, 0xb9, 0x5b, 0xdd, 0x1e, 0xe9, 0xa4, 0x9e, 0x4c, 0x71, 0x03, 0xe9, - 0x78, 0xb7, 0x47, 0xb0, 0x28, 0x8f, 0x3e, 0xc1, 0xfd, 0x1e, 0xaf, 0x79, 0xfe, 0x1d, 0xef, 0x8a, - 0xef, 0xcb, 0xb8, 0x0c, 0xbd, 0x31, 0x9c, 0x94, 0xde, 0x8e, 0xaa, 0x38, 0x36, 0xb9, 0xf5, 0x16, - 0x47, 0xf3, 0x33, 0xc0, 0xa2, 0x85, 0x9b, 0xcf, 0xa6, 0x43, 0x44, 0x60, 0x5c, 0x04, 0x24, 0x93, - 0x30, 0xd1, 0x77, 0x99, 0x57, 0x39, 0xb3, 0x74, 0xac, 0x48, 0xbf, 0x66, 0xb2, 0xc0, 0x49, 0x9e, - 0xf6, 0x4f, 0x5b, 0xc0, 0x9e, 0x90, 0x9e, 0x80, 0x3c, 0xf2, 0x21, 0x53, 0x1e, 0x99, 0xce, 0xeb, - 0xe4, 0x1c, 0x51, 0xe4, 0x05, 0x3e, 0xb3, 0x2a, 0x81, 0x7f, 0x77, 0x5f, 0x38, 0xab, 0x74, 0xbf, - 0x7f, 0xd8, 0xff, 0xc7, 0xe2, 0x9b, 0x98, 0x7a, 0x65, 0x81, 0x3e, 0x0b, 0x43, 0x35, 0xa7, 0xe5, - 0xd4, 0x78, 0x4e, 0x9b, 0x5c, 0x8d, 0x9e, 0x51, 0x68, 0x6e, 0x49, 0x94, 0xe0, 0x1a, 0x2a, 0x19, - 0xd8, 0x6e, 0x48, 0x82, 0xbb, 0x6a, 0xa5, 0x54, 0x95, 0x33, 0xbb, 0x30, 0x6a, 0x30, 0x7b, 0xa0, - 0xea, 0x8c, 0xcf, 0xf2, 0x23, 0x56, 0x05, 0x62, 0x6c, 0xc2, 0xa4, 0xa7, 0xfd, 0xa7, 0x07, 0x8a, - 0xbc, 0x5c, 0x3e, 0xde, 0xed, 0x10, 0x65, 0xa7, 0x8f, 0xf6, 0x3a, 0x35, 0xc1, 0x06, 0xa7, 0x39, - 0xdb, 0x3f, 0x61, 0xc1, 0x43, 0x3a, 0xa1, 0xf6, 0x00, 0xa6, 0x9b, 0x91, 0x64, 0x19, 0x86, 0xfc, - 0x16, 0x09, 0x9c, 0xc8, 0x0f, 0xc4, 0xa9, 0x71, 0x49, 0x76, 0xfa, 0x0d, 0x01, 0x3f, 0x14, 0x11, - 0xda, 0x25, 0x77, 0x09, 0xc7, 0xaa, 0x24, 0xbd, 0x7d, 0xb2, 0xce, 0x08, 0xc5, 0x53, 0x27, 0xb6, - 0x07, 0x30, 0x4b, 0x7a, 0x88, 0x05, 0xc6, 0xfe, 0x86, 0xc5, 0x27, 0x96, 0xde, 0x74, 0xf4, 0x26, - 0x4c, 0x34, 0x9d, 0xa8, 0xb6, 0xb3, 0x72, 0xb7, 0x15, 0x70, 0x93, 0x93, 0xec, 0xa7, 0xa7, 0xbb, - 0xf5, 0x93, 0xf6, 0x91, 0xb1, 0x2b, 0xe7, 0x5a, 0x82, 0x19, 0x4e, 0xb1, 0x47, 0x9b, 0x30, 0xcc, - 0x60, 0xec, 0x15, 0x5f, 0xd8, 0x49, 0x34, 0xc8, 0xab, 0x4d, 0x39, 0x23, 0xac, 0xc5, 0x7c, 0xb0, - 0xce, 0xd4, 0xfe, 0x72, 0x91, 0xaf, 0x76, 0x26, 0xca, 0x3f, 0x05, 0x83, 0x2d, 0xbf, 0xbe, 0x54, - 0x5e, 0xc6, 0x62, 0x14, 0xd4, 0x31, 0x52, 0xe1, 0x60, 0x2c, 0xf1, 0xe8, 0x12, 0x0c, 0x89, 0x9f, - 0xd2, 0x44, 0xc8, 0xf6, 0x66, 0x41, 0x17, 0x62, 0x85, 0x45, 0xcf, 0x01, 0xb4, 0x02, 0x7f, 0xcf, - 0xad, 0xb3, 0xe8, 0x12, 0x45, 0xd3, 0x8f, 0xa8, 0xa2, 0x30, 0x58, 0xa3, 0x42, 0xaf, 0xc0, 0x68, - 0xdb, 0x0b, 0xb9, 0x38, 0xa2, 0xc5, 0x92, 0x55, 0x1e, 0x2e, 0x37, 0x75, 0x24, 0x36, 0x69, 0xd1, - 0x02, 0x0c, 0x44, 0x0e, 0xf3, 0x8b, 0xe9, 0xcf, 0x77, 0xf7, 0xdd, 0xa0, 0x14, 0x7a, 0xfa, 0x14, - 0x5a, 0x00, 0x8b, 0x82, 0xe8, 0x63, 0xf2, 0x41, 0x2d, 0xdf, 0xd8, 0x85, 0x9f, 0x7d, 0x6f, 0x87, - 0x80, 0xf6, 0x9c, 0x56, 0xf8, 0xef, 0x1b, 0xbc, 0xd0, 0xcb, 0x00, 0xe4, 0x6e, 0x44, 0x02, 0xcf, - 0x69, 0x28, 0x6f, 0x36, 0x25, 0x17, 0x2c, 0xfb, 0xeb, 0x7e, 0x74, 0x33, 0x24, 0x2b, 0x8a, 0x02, - 0x6b, 0xd4, 0xf6, 0xaf, 0x97, 0x00, 0x62, 0xb9, 0x1d, 0xbd, 0x95, 0xda, 0xb8, 0x9e, 0xe9, 0x2c, - 0xe9, 0x1f, 0xdf, 0xae, 0x85, 0xbe, 0xcf, 0x82, 0x61, 0xa7, 0xd1, 0xf0, 0x6b, 0x0e, 0x8f, 0xf6, - 0x5b, 0xe8, 0xbc, 0x71, 0x8a, 0xfa, 0x17, 0xe2, 0x12, 0xbc, 0x09, 0xcf, 0xcb, 0x19, 0xaa, 0x61, - 0xba, 0xb6, 0x42, 0xaf, 0x18, 0xbd, 0x4f, 0x5e, 0x15, 0x8b, 0x46, 0x57, 0xaa, 0xab, 0x62, 0x89, - 0x9d, 0x11, 0xfa, 0x2d, 0xf1, 0xa6, 0x71, 0x4b, 0xec, 0xcb, 0x7f, 0x31, 0x68, 0x88, 0xaf, 0xdd, - 0x2e, 0x88, 0xa8, 0xa2, 0x47, 0x0f, 0xe8, 0xcf, 0x7f, 0x9e, 0xa7, 0xdd, 0x93, 0xba, 0x44, 0x0e, - 0xf8, 0x34, 0x8c, 0xd7, 0x4d, 0x21, 0x40, 0xcc, 0xc4, 0x27, 0xf3, 0xf8, 0x26, 0x64, 0x86, 0xf8, - 0xd8, 0x4f, 0x20, 0x70, 0x92, 0x31, 0xaa, 0xf0, 0x60, 0x12, 0x65, 0x6f, 0xcb, 0x17, 0x6f, 0x3d, - 0xec, 0xdc, 0xb1, 0xdc, 0x0f, 0x23, 0xd2, 0xa4, 0x94, 0xf1, 0xe9, 0xbe, 0x2e, 0xca, 0x62, 0xc5, - 0x05, 0xbd, 0x06, 0x03, 0xec, 0x7d, 0x56, 0x38, 0x3d, 0x94, 0xaf, 0x71, 0x36, 0xa3, 0xa3, 0xc5, - 0x0b, 0x92, 0xfd, 0x0d, 0xb1, 0xe0, 0x80, 0xae, 0xca, 0xd7, 0x8f, 0x61, 0xd9, 0xbb, 0x19, 0x12, - 0xf6, 0xfa, 0xb1, 0xb4, 0xf8, 0x78, 0xfc, 0xb0, 0x91, 0xc3, 0x33, 0x93, 0xac, 0x19, 0x25, 0xa9, - 0x14, 0x25, 0xfe, 0xcb, 0xdc, 0x6d, 0xd3, 0x90, 0xdf, 0x3c, 0x33, 0xbf, 0x5b, 0xdc, 0x9d, 0xb7, - 0x4c, 0x16, 0x38, 0xc9, 0x93, 0x4a, 0xa4, 0x7c, 0xd5, 0x8b, 0xd7, 0x22, 0xdd, 0xf6, 0x0e, 0x7e, - 0x11, 0x67, 0xa7, 0x11, 0x87, 0x60, 0x51, 0xfe, 0x44, 0xc5, 0x83, 0x19, 0x0f, 0x26, 0x92, 0x4b, - 0xf4, 0x81, 0x8a, 0x23, 0xbf, 0xdf, 0x07, 0x63, 0xe6, 0x94, 0x42, 0xf3, 0x50, 0x12, 0x4c, 0x54, - 0xfe, 0x03, 0xb5, 0x4a, 0xd6, 0x24, 0x02, 0xc7, 0x34, 0x2c, 0xed, 0x05, 0x2b, 0xae, 0xb9, 0x07, - 0xc7, 0x69, 0x2f, 0x14, 0x06, 0x6b, 0x54, 0xf4, 0x62, 0xb5, 0xe9, 0xfb, 0x91, 0x3a, 0x90, 0xd4, - 0xbc, 0x5b, 0x64, 0x50, 0x2c, 0xb0, 0xf4, 0x20, 0xda, 0x25, 0x81, 0x47, 0x1a, 0x66, 0xdc, 0x61, - 0x75, 0x10, 0x5d, 0xd3, 0x91, 0xd8, 0xa4, 0xa5, 0xc7, 0xa9, 0x1f, 0xb2, 0x89, 0x2c, 0xae, 0x6f, - 0xb1, 0xbb, 0x75, 0x95, 0x3f, 0xc0, 0x96, 0x78, 0xf4, 0x51, 0x78, 0x48, 0xc5, 0x56, 0xc2, 0xdc, - 0x9a, 0x21, 0x6b, 0x1c, 0x30, 0xb4, 0x2d, 0x0f, 0x2d, 0x65, 0x93, 0xe1, 0xbc, 0xf2, 0xe8, 0x55, - 0x18, 0x13, 0x22, 0xbe, 0xe4, 0x38, 0x68, 0x7a, 0x18, 0x5d, 0x33, 0xb0, 0x38, 0x41, 0x2d, 0x23, - 0x27, 0x33, 0x29, 0x5b, 0x72, 0x18, 0x4a, 0x47, 0x4e, 0xd6, 0xf1, 0x38, 0x55, 0x02, 0x2d, 0xc0, - 0x38, 0x97, 0xc1, 0x5c, 0x6f, 0x9b, 0x8f, 0x89, 0x78, 0xcc, 0xa5, 0x96, 0xd4, 0x0d, 0x13, 0x8d, - 0x93, 0xf4, 0xe8, 0x25, 0x18, 0x71, 0x82, 0xda, 0x8e, 0x1b, 0x91, 0x5a, 0xd4, 0x0e, 0xf8, 0x2b, - 0x2f, 0xcd, 0x45, 0x6b, 0x41, 0xc3, 0x61, 0x83, 0xd2, 0x7e, 0x0b, 0xa6, 0x32, 0x22, 0x33, 0xd0, - 0x89, 0xe3, 0xb4, 0x5c, 0xf9, 0x4d, 0x09, 0x0f, 0xe7, 0x85, 0x4a, 0x59, 0x7e, 0x8d, 0x46, 0x45, - 0x67, 0x27, 0x8b, 0xe0, 0xa0, 0xa5, 0x6a, 0x54, 0xb3, 0x73, 0x55, 0x22, 0x70, 0x4c, 0x63, 0xff, - 0xb7, 0x02, 0x8c, 0x67, 0xd8, 0x56, 0x58, 0xba, 0xc0, 0xc4, 0x25, 0x25, 0xce, 0x0e, 0x68, 0x06, - 0xe2, 0x2e, 0x1c, 0x21, 0x10, 0x77, 0xb1, 0x5b, 0x20, 0xee, 0xbe, 0xb7, 0x13, 0x88, 0xdb, 0xec, - 0xb1, 0xfe, 0x9e, 0x7a, 0x2c, 0x23, 0x78, 0xf7, 0xc0, 0x11, 0x83, 0x77, 0x1b, 0x9d, 0x3e, 0xd8, - 0x43, 0xa7, 0xff, 0x48, 0x01, 0x26, 0x92, 0xae, 0xa4, 0x27, 0xa0, 0xb7, 0x7d, 0xcd, 0xd0, 0xdb, - 0x5e, 0xea, 0xe5, 0xf1, 0x6d, 0xae, 0x0e, 0x17, 0x27, 0x74, 0xb8, 0xef, 0xed, 0x89, 0x5b, 0x67, - 0x7d, 0xee, 0x5f, 0x2f, 0xc0, 0xe9, 0xcc, 0xd7, 0xbf, 0x27, 0xd0, 0x37, 0x37, 0x8c, 0xbe, 0x79, - 0xb6, 0xe7, 0x87, 0xc9, 0xb9, 0x1d, 0x74, 0x3b, 0xd1, 0x41, 0xf3, 0xbd, 0xb3, 0xec, 0xdc, 0x4b, - 0x5f, 0x2f, 0xc2, 0xf9, 0xcc, 0x72, 0xb1, 0xda, 0x73, 0xd5, 0x50, 0x7b, 0x3e, 0x97, 0x50, 0x7b, - 0xda, 0x9d, 0x4b, 0x1f, 0x8f, 0x1e, 0x54, 0x3c, 0xd0, 0x65, 0x61, 0x06, 0xee, 0x53, 0x07, 0x6a, - 0x3c, 0xd0, 0x55, 0x8c, 0xb0, 0xc9, 0xf7, 0xdb, 0x49, 0xf7, 0xf9, 0xaf, 0x2c, 0x38, 0x9b, 0x39, - 0x36, 0x27, 0xa0, 0xeb, 0x5a, 0x37, 0x75, 0x5d, 0x4f, 0xf5, 0x3c, 0x5b, 0x73, 0x94, 0x5f, 0x3f, - 0xd5, 0x9f, 0xf3, 0x2d, 0xec, 0x26, 0x7f, 0x03, 0x86, 0x9d, 0x5a, 0x8d, 0x84, 0xe1, 0x9a, 0x5f, - 0x57, 0xb1, 0x86, 0x9f, 0x65, 0xf7, 0xac, 0x18, 0x7c, 0x78, 0x30, 0x3b, 0x93, 0x64, 0x11, 0xa3, - 0xb1, 0xce, 0x01, 0x7d, 0x02, 0x86, 0x42, 0x71, 0x6e, 0x8a, 0xb1, 0x7f, 0xbe, 0xc7, 0xce, 0x71, - 0x36, 0x49, 0xc3, 0x0c, 0x86, 0xa4, 0x34, 0x15, 0x8a, 0xa5, 0x19, 0x38, 0xa5, 0x70, 0xac, 0x81, - 0x53, 0x9e, 0x03, 0xd8, 0x53, 0x97, 0x81, 0xa4, 0xfe, 0x41, 0xbb, 0x26, 0x68, 0x54, 0xe8, 0xc3, - 0x30, 0x11, 0xf2, 0x68, 0x81, 0x4b, 0x0d, 0x27, 0x64, 0xef, 0x68, 0xc4, 0x2c, 0x64, 0x01, 0x97, - 0xaa, 0x09, 0x1c, 0x4e, 0x51, 0xa3, 0x55, 0x59, 0x2b, 0x0b, 0x6d, 0xc8, 0x27, 0xe6, 0xc5, 0xb8, - 0x46, 0x91, 0xac, 0xf8, 0x54, 0xb2, 0xfb, 0x59, 0xc7, 0x6b, 0x25, 0xd1, 0x27, 0x00, 0xe8, 0xf4, - 0x11, 0x7a, 0x88, 0xc1, 0xfc, 0xcd, 0x93, 0xee, 0x2a, 0xf5, 0x4c, 0xe7, 0x66, 0xf6, 0xa6, 0x76, - 0x59, 0x31, 0xc1, 0x1a, 0x43, 0xe4, 0xc0, 0x68, 0xfc, 0x2f, 0xce, 0xe5, 0x79, 0x29, 0xb7, 0x86, - 0x24, 0x73, 0xa6, 0xf2, 0x5e, 0xd6, 0x59, 0x60, 0x93, 0xa3, 0xfd, 0x63, 0x83, 0xf0, 0x70, 0x87, - 0x6d, 0x18, 0x2d, 0x98, 0xa6, 0xde, 0xa7, 0x93, 0xf7, 0xf7, 0x99, 0xcc, 0xc2, 0xc6, 0x85, 0x3e, - 0x31, 0xdb, 0x0b, 0x6f, 0x7b, 0xb6, 0xff, 0x90, 0xa5, 0x69, 0x56, 0xb8, 0x53, 0xe9, 0x87, 0x8e, - 0x78, 0xbc, 0x1c, 0xa3, 0xaa, 0x65, 0x2b, 0x43, 0x5f, 0xf1, 0x5c, 0xcf, 0xcd, 0xe9, 0x5d, 0x81, - 0xf1, 0x55, 0x0b, 0x90, 0xd0, 0xac, 0x90, 0xba, 0x5a, 0x4b, 0x42, 0x95, 0x71, 0xe5, 0xa8, 0xdf, - 0xbf, 0x90, 0xe2, 0xc4, 0x7b, 0xe2, 0x65, 0x79, 0x0e, 0xa4, 0x09, 0xba, 0xf6, 0x49, 0x46, 0xf3, - 0xd0, 0x47, 0x59, 0x20, 0x5d, 0xf7, 0x2d, 0x21, 0xfc, 0x88, 0xb5, 0xf6, 0xa2, 0x08, 0xa2, 0xab, - 0xe0, 0x54, 0xca, 0xcd, 0x6c, 0xae, 0x4e, 0x84, 0x0d, 0x56, 0x27, 0x7b, 0xf5, 0x6e, 0xc3, 0x43, - 0x39, 0x5d, 0xf6, 0x40, 0x6f, 0xe0, 0xbf, 0x69, 0xc1, 0xb9, 0x8e, 0x11, 0x61, 0xbe, 0x05, 0x65, - 0x43, 0xfb, 0x73, 0x16, 0x64, 0x0f, 0xb6, 0xe1, 0x51, 0x36, 0x0f, 0xa5, 0x5a, 0x22, 0xeb, 0x60, - 0x1c, 0x1b, 0x41, 0x65, 0x1c, 0x8c, 0x69, 0x0c, 0xc7, 0xb1, 0x42, 0x57, 0xc7, 0xb1, 0x5f, 0xb1, - 0x20, 0xb5, 0xbf, 0x9f, 0x80, 0xa0, 0x51, 0x36, 0x05, 0x8d, 0xc7, 0x7b, 0xe9, 0xcd, 0x1c, 0x19, - 0xe3, 0x8f, 0xc6, 0xe1, 0x4c, 0xce, 0x8b, 0xbc, 0x3d, 0x98, 0xdc, 0xae, 0x11, 0xf3, 0x71, 0x75, - 0xa7, 0xa0, 0x43, 0x1d, 0x5f, 0x62, 0xf3, 0x64, 0x8f, 0x29, 0x12, 0x9c, 0xae, 0x02, 0x7d, 0xce, - 0x82, 0x53, 0xce, 0x9d, 0x70, 0x85, 0x0a, 0x8c, 0x6e, 0x6d, 0xb1, 0xe1, 0xd7, 0x76, 0xe9, 0x69, - 0x2c, 0x17, 0xc2, 0x0b, 0x99, 0x4a, 0xbc, 0xdb, 0xd5, 0x14, 0xbd, 0x51, 0x3d, 0x4b, 0xed, 0x9b, - 0x45, 0x85, 0x33, 0xeb, 0x42, 0x58, 0x64, 0x4f, 0xa0, 0xd7, 0xd1, 0x0e, 0xcf, 0xff, 0xb3, 0x9e, - 0x4e, 0x72, 0x09, 0x48, 0x62, 0xb0, 0xe2, 0x83, 0x3e, 0x05, 0xa5, 0x6d, 0xf9, 0xd2, 0x37, 0x43, - 0xc2, 0x8a, 0x3b, 0xb2, 0xf3, 0xfb, 0x67, 0x6e, 0x89, 0x57, 0x44, 0x38, 0x66, 0x8a, 0x5e, 0x85, - 0xa2, 0xb7, 0x15, 0x76, 0xca, 0x8e, 0x9b, 0x70, 0xb9, 0xe4, 0x41, 0x36, 0xd6, 0x57, 0xab, 0x98, - 0x16, 0x44, 0x57, 0xa1, 0x18, 0x6c, 0xd6, 0x85, 0x06, 0x3a, 0x73, 0x91, 0xe2, 0xc5, 0xe5, 0x9c, - 0x56, 0x31, 0x4e, 0x78, 0x71, 0x19, 0x53, 0x16, 0xa8, 0x02, 0xfd, 0xec, 0x19, 0x9b, 0x90, 0x67, - 0x32, 0x6f, 0x6e, 0x1d, 0x9e, 0x83, 0xf2, 0x48, 0x1c, 0x8c, 0x00, 0x73, 0x46, 0x68, 0x03, 0x06, - 0x6a, 0x2c, 0x93, 0xaa, 0x10, 0x60, 0xde, 0x97, 0xa9, 0x6b, 0xee, 0x90, 0x62, 0x56, 0xa8, 0x5e, - 0x19, 0x05, 0x16, 0xbc, 0x18, 0x57, 0xd2, 0xda, 0xd9, 0x0a, 0x45, 0xa6, 0xf1, 0x6c, 0xae, 0x1d, - 0x32, 0x27, 0x0b, 0xae, 0x8c, 0x02, 0x0b, 0x5e, 0xe8, 0x65, 0x28, 0x6c, 0xd5, 0xc4, 0x13, 0xb5, - 0x4c, 0xa5, 0xb3, 0x19, 0x27, 0x65, 0x71, 0xe0, 0xde, 0xc1, 0x6c, 0x61, 0x75, 0x09, 0x17, 0xb6, - 0x6a, 0x68, 0x1d, 0x06, 0xb7, 0x78, 0x64, 0x05, 0xa1, 0x57, 0x7e, 0x32, 0x3b, 0xe8, 0x43, 0x2a, - 0xf8, 0x02, 0x7f, 0xee, 0x24, 0x10, 0x58, 0x32, 0x61, 0xc9, 0x08, 0x54, 0x84, 0x08, 0x11, 0xa0, - 0x6e, 0xee, 0x68, 0x51, 0x3d, 0xb8, 0x7c, 0x19, 0xc7, 0x99, 0xc0, 0x1a, 0x47, 0x3a, 0xab, 0x9d, - 0xb7, 0xda, 0x01, 0x8b, 0x02, 0x2e, 0x22, 0x19, 0x65, 0xce, 0xea, 0x05, 0x49, 0xd4, 0x69, 0x56, - 0x2b, 0x22, 0x1c, 0x33, 0x45, 0xbb, 0x30, 0xba, 0x17, 0xb6, 0x76, 0x88, 0x5c, 0xd2, 0x2c, 0xb0, - 0x51, 0x8e, 0x7c, 0x74, 0x4b, 0x10, 0xba, 0x41, 0xd4, 0x76, 0x1a, 0xa9, 0x5d, 0x88, 0xc9, 0xb2, - 0xb7, 0x74, 0x66, 0xd8, 0xe4, 0x4d, 0xbb, 0xff, 0xcd, 0xb6, 0xbf, 0xb9, 0x1f, 0x11, 0x11, 0x57, - 0x2e, 0xb3, 0xfb, 0x5f, 0xe7, 0x24, 0xe9, 0xee, 0x17, 0x08, 0x2c, 0x99, 0xa0, 0x5b, 0xa2, 0x7b, - 0xd8, 0xee, 0x39, 0x91, 0x1f, 0xfc, 0x75, 0x41, 0x12, 0xe5, 0x74, 0x0a, 0xdb, 0x2d, 0x63, 0x56, - 0x6c, 0x97, 0x6c, 0xed, 0xf8, 0x91, 0xef, 0x25, 0x76, 0xe8, 0xc9, 0xfc, 0x5d, 0xb2, 0x92, 0x41, - 0x9f, 0xde, 0x25, 0xb3, 0xa8, 0x70, 0x66, 0x5d, 0xa8, 0x0e, 0x63, 0x2d, 0x3f, 0x88, 0xee, 0xf8, - 0x81, 0x9c, 0x5f, 0xa8, 0x83, 0x5e, 0xcc, 0xa0, 0x14, 0x35, 0xb2, 0x90, 0x8d, 0x26, 0x06, 0x27, - 0x78, 0xa2, 0x8f, 0xc0, 0x60, 0x58, 0x73, 0x1a, 0xa4, 0x7c, 0x63, 0x7a, 0x2a, 0xff, 0xf8, 0xa9, - 0x72, 0x92, 0x9c, 0xd9, 0xc5, 0x03, 0x63, 0x70, 0x12, 0x2c, 0xd9, 0xa1, 0x55, 0xe8, 0x67, 0xc9, - 0xe6, 0x58, 0x10, 0xc4, 0x9c, 0x18, 0xb6, 0x29, 0x07, 0x78, 0xbe, 0x37, 0x31, 0x30, 0xe6, 0xc5, - 0xe9, 0x1a, 0x10, 0xd7, 0x43, 0x3f, 0x9c, 0x3e, 0x9d, 0xbf, 0x06, 0xc4, 0xad, 0xf2, 0x46, 0xb5, - 0xd3, 0x1a, 0x50, 0x44, 0x38, 0x66, 0x4a, 0x77, 0x66, 0xba, 0x9b, 0x9e, 0xe9, 0xe0, 0xb9, 0x95, - 0xbb, 0x97, 0xb2, 0x9d, 0x99, 0xee, 0xa4, 0x94, 0x85, 0xfd, 0xbb, 0x83, 0x69, 0x99, 0x85, 0x29, - 0x14, 0xbe, 0xc7, 0x4a, 0xd9, 0x9a, 0xdf, 0xdf, 0xab, 0x7e, 0xf3, 0x18, 0xaf, 0x42, 0x9f, 0xb3, - 0xe0, 0x4c, 0x2b, 0xf3, 0x43, 0x84, 0x00, 0xd0, 0x9b, 0x9a, 0x94, 0x7f, 0xba, 0x0a, 0x98, 0x99, - 0x8d, 0xc7, 0x39, 0x35, 0x25, 0xaf, 0x9b, 0xc5, 0xb7, 0x7d, 0xdd, 0x5c, 0x83, 0xa1, 0x1a, 0xbf, - 0x8a, 0x74, 0xcc, 0x2c, 0x9e, 0xbc, 0x7b, 0x33, 0x51, 0x42, 0xdc, 0x61, 0xb6, 0xb0, 0x62, 0x81, - 0x7e, 0xd8, 0x82, 0x73, 0xc9, 0xa6, 0x63, 0xc2, 0xd0, 0x22, 0xca, 0x26, 0xd7, 0x65, 0xac, 0x8a, - 0xef, 0x4f, 0xc9, 0xff, 0x06, 0xf1, 0x61, 0x37, 0x02, 0xdc, 0xb9, 0x32, 0xb4, 0x9c, 0xa1, 0x4c, - 0x19, 0x30, 0x0d, 0x48, 0x3d, 0x28, 0x54, 0x5e, 0x80, 0x91, 0xa6, 0xdf, 0xf6, 0x22, 0xe1, 0xe8, - 0x25, 0x9c, 0x4e, 0x98, 0xb3, 0xc5, 0x9a, 0x06, 0xc7, 0x06, 0x55, 0x42, 0x0d, 0x33, 0x74, 0xdf, - 0x6a, 0x98, 0x37, 0x60, 0xc4, 0xd3, 0x3c, 0x93, 0x85, 0x3c, 0x70, 0x31, 0x3f, 0x42, 0xae, 0xee, - 0xc7, 0xcc, 0x5b, 0xa9, 0x43, 0xb0, 0xc1, 0xed, 0x64, 0x3d, 0xc0, 0xbe, 0x64, 0x65, 0x08, 0xf5, - 0x5c, 0x15, 0xf3, 0x41, 0x53, 0x15, 0x73, 0x31, 0xa9, 0x8a, 0x49, 0x19, 0x0f, 0x0c, 0x2d, 0x4c, - 0xef, 0x09, 0x80, 0x7a, 0x8d, 0xb2, 0x69, 0x37, 0xe0, 0x42, 0xb7, 0x63, 0x89, 0x79, 0xfc, 0xd5, - 0x95, 0xa9, 0x38, 0xf6, 0xf8, 0xab, 0x97, 0x97, 0x31, 0xc3, 0xf4, 0x1a, 0xbf, 0xc9, 0xfe, 0x2f, - 0x16, 0x14, 0x2b, 0x7e, 0xfd, 0x04, 0x2e, 0xbc, 0x1f, 0x32, 0x2e, 0xbc, 0x0f, 0x67, 0x1f, 0x88, - 0xf5, 0x5c, 0xd3, 0xc7, 0x4a, 0xc2, 0xf4, 0x71, 0x2e, 0x8f, 0x41, 0x67, 0x43, 0xc7, 0x4f, 0x16, - 0x61, 0xb8, 0xe2, 0xd7, 0x95, 0xbb, 0xfd, 0x3f, 0xbd, 0x1f, 0x77, 0xfb, 0xdc, 0x34, 0x16, 0x1a, - 0x67, 0xe6, 0x28, 0x28, 0x5f, 0x1a, 0x7f, 0x8b, 0x79, 0xdd, 0xdf, 0x26, 0xee, 0xf6, 0x4e, 0x44, - 0xea, 0xc9, 0xcf, 0x39, 0x39, 0xaf, 0xfb, 0xdf, 0x2d, 0xc0, 0x78, 0xa2, 0x76, 0xd4, 0x80, 0xd1, - 0x86, 0xae, 0x58, 0x17, 0xf3, 0xf4, 0xbe, 0x74, 0xf2, 0xc2, 0x6b, 0x59, 0x03, 0x61, 0x93, 0x39, - 0x9a, 0x03, 0x50, 0x96, 0x66, 0xa9, 0x5e, 0x65, 0x52, 0xbf, 0x32, 0x45, 0x87, 0x58, 0xa3, 0x40, - 0x2f, 0xc2, 0x70, 0xe4, 0xb7, 0xfc, 0x86, 0xbf, 0xbd, 0x7f, 0x8d, 0xc8, 0xd0, 0x5e, 0xca, 0x17, - 0x71, 0x23, 0x46, 0x61, 0x9d, 0x0e, 0xdd, 0x85, 0x49, 0xc5, 0xa4, 0x7a, 0x0c, 0xc6, 0x06, 0xa6, - 0x55, 0x58, 0x4f, 0x72, 0xc4, 0xe9, 0x4a, 0xec, 0x9f, 0x29, 0xf2, 0x2e, 0xf6, 0x22, 0xf7, 0xdd, - 0xd5, 0xf0, 0xce, 0x5e, 0x0d, 0x5f, 0xb7, 0x60, 0x82, 0xd6, 0xce, 0x1c, 0xad, 0xe4, 0x31, 0xaf, - 0x62, 0x72, 0x5b, 0x1d, 0x62, 0x72, 0x5f, 0xa4, 0xbb, 0x66, 0xdd, 0x6f, 0x47, 0x42, 0x77, 0xa7, - 0x6d, 0x8b, 0x14, 0x8a, 0x05, 0x56, 0xd0, 0x91, 0x20, 0x10, 0x8f, 0x43, 0x75, 0x3a, 0x12, 0x04, - 0x58, 0x60, 0x65, 0xc8, 0xee, 0xbe, 0xec, 0x90, 0xdd, 0x3c, 0xf2, 0xaa, 0x70, 0xc9, 0x11, 0x02, - 0x97, 0x16, 0x79, 0x55, 0xfa, 0xea, 0xc4, 0x34, 0xf6, 0x57, 0x8b, 0x30, 0x52, 0xf1, 0xeb, 0xb1, - 0x95, 0xf9, 0x05, 0xc3, 0xca, 0x7c, 0x21, 0x61, 0x65, 0x9e, 0xd0, 0x69, 0xdf, 0xb5, 0x29, 0x7f, - 0xb3, 0x6c, 0xca, 0xbf, 0x6c, 0xb1, 0x51, 0x5b, 0x5e, 0xaf, 0x72, 0xbf, 0x3d, 0x74, 0x19, 0x86, - 0xd9, 0x06, 0xc3, 0x5e, 0x23, 0x4b, 0xd3, 0x2b, 0x4b, 0x45, 0xb5, 0x1e, 0x83, 0xb1, 0x4e, 0x83, - 0x2e, 0xc1, 0x50, 0x48, 0x9c, 0xa0, 0xb6, 0xa3, 0x76, 0x57, 0x61, 0x27, 0xe5, 0x30, 0xac, 0xb0, - 0xe8, 0xf5, 0x38, 0xe8, 0x67, 0x31, 0xff, 0x75, 0xa3, 0xde, 0x1e, 0xbe, 0x44, 0xf2, 0x23, 0x7d, - 0xda, 0xb7, 0x01, 0xa5, 0xe9, 0x7b, 0x08, 0x4b, 0x37, 0x6b, 0x86, 0xa5, 0x2b, 0xa5, 0x42, 0xd2, - 0xfd, 0x89, 0x05, 0x63, 0x15, 0xbf, 0x4e, 0x97, 0xee, 0xb7, 0xd3, 0x3a, 0xd5, 0x23, 0x1e, 0x0f, - 0x74, 0x88, 0x78, 0xfc, 0x18, 0xf4, 0x57, 0xfc, 0x7a, 0xb9, 0xd2, 0x29, 0xb4, 0x80, 0xfd, 0x37, - 0x2c, 0x18, 0xac, 0xf8, 0xf5, 0x13, 0x30, 0x0b, 0x7c, 0xd0, 0x34, 0x0b, 0x3c, 0x94, 0x33, 0x6f, - 0x72, 0x2c, 0x01, 0x7f, 0xad, 0x0f, 0x46, 0x69, 0x3b, 0xfd, 0x6d, 0x39, 0x94, 0x46, 0xb7, 0x59, - 0x3d, 0x74, 0x1b, 0x95, 0xc2, 0xfd, 0x46, 0xc3, 0xbf, 0x93, 0x1c, 0xd6, 0x55, 0x06, 0xc5, 0x02, - 0x8b, 0x9e, 0x81, 0xa1, 0x56, 0x40, 0xf6, 0x5c, 0x5f, 0x88, 0xb7, 0x9a, 0x91, 0xa5, 0x22, 0xe0, - 0x58, 0x51, 0xd0, 0x6b, 0x61, 0xe8, 0x7a, 0xf4, 0x28, 0xaf, 0xf9, 0x5e, 0x9d, 0x6b, 0xce, 0x8b, - 0x22, 0x2d, 0x87, 0x06, 0xc7, 0x06, 0x15, 0xba, 0x0d, 0x25, 0xf6, 0x9f, 0x6d, 0x3b, 0x47, 0x4f, - 0xf0, 0x2a, 0x12, 0xfe, 0x09, 0x06, 0x38, 0xe6, 0x85, 0x9e, 0x03, 0x88, 0x64, 0x68, 0xfb, 0x50, - 0x04, 0x5a, 0x53, 0x57, 0x01, 0x15, 0xf4, 0x3e, 0xc4, 0x1a, 0x15, 0x7a, 0x1a, 0x4a, 0x91, 0xe3, - 0x36, 0xae, 0xbb, 0x1e, 0x09, 0x99, 0x46, 0xbc, 0x28, 0xf3, 0xee, 0x09, 0x20, 0x8e, 0xf1, 0x54, - 0x14, 0x63, 0x41, 0x38, 0x78, 0x7a, 0xe8, 0x21, 0x46, 0xcd, 0x44, 0xb1, 0xeb, 0x0a, 0x8a, 0x35, - 0x0a, 0xb4, 0x03, 0x8f, 0xb8, 0x1e, 0x4b, 0x61, 0x41, 0xaa, 0xbb, 0x6e, 0x6b, 0xe3, 0x7a, 0xf5, - 0x16, 0x09, 0xdc, 0xad, 0xfd, 0x45, 0xa7, 0xb6, 0x4b, 0x3c, 0x99, 0xba, 0xf3, 0x71, 0xd1, 0xc4, - 0x47, 0xca, 0x1d, 0x68, 0x71, 0x47, 0x4e, 0xf6, 0xf3, 0x6c, 0xbe, 0xdf, 0xa8, 0xa2, 0xf7, 0x1a, - 0x5b, 0xc7, 0x19, 0x7d, 0xeb, 0x38, 0x3c, 0x98, 0x1d, 0xb8, 0x51, 0xd5, 0x62, 0x48, 0xbc, 0x04, - 0xa7, 0x2b, 0x7e, 0xbd, 0xe2, 0x07, 0xd1, 0xaa, 0x1f, 0xdc, 0x71, 0x82, 0xba, 0x9c, 0x5e, 0xb3, - 0x32, 0x8a, 0x06, 0xdd, 0x3f, 0xfb, 0xf9, 0xee, 0x62, 0x44, 0xc8, 0x78, 0x9e, 0x49, 0x6c, 0x47, - 0x7c, 0xfb, 0x55, 0x63, 0xb2, 0x83, 0x4a, 0x02, 0x73, 0xc5, 0x89, 0x08, 0xba, 0xc1, 0x92, 0x5b, - 0xc7, 0xc7, 0xa8, 0x28, 0xfe, 0x94, 0x96, 0xdc, 0x3a, 0x46, 0x66, 0x9e, 0xbb, 0x66, 0x79, 0xfb, - 0xb3, 0xa2, 0x12, 0x7e, 0x07, 0xe7, 0xfe, 0x75, 0xbd, 0x64, 0xb7, 0x95, 0x59, 0x22, 0x0a, 0xf9, - 0xe9, 0x05, 0xb8, 0xd5, 0xb3, 0x63, 0x96, 0x08, 0xfb, 0x45, 0x98, 0xa4, 0x57, 0x3f, 0x25, 0x47, - 0xb1, 0x8f, 0xec, 0x1e, 0xcd, 0xe3, 0xbf, 0xf6, 0xb3, 0x73, 0x20, 0x91, 0xfe, 0x04, 0x7d, 0x12, - 0xc6, 0x42, 0x72, 0xdd, 0xf5, 0xda, 0x77, 0xa5, 0xe2, 0xa5, 0xc3, 0x9b, 0xc3, 0xea, 0x8a, 0x4e, - 0xc9, 0xd5, 0xb7, 0x26, 0x0c, 0x27, 0xb8, 0xa1, 0x26, 0x8c, 0xdd, 0x71, 0xbd, 0xba, 0x7f, 0x27, - 0x94, 0xfc, 0x87, 0xf2, 0xb5, 0xb8, 0xb7, 0x39, 0x65, 0xa2, 0x8d, 0x46, 0x75, 0xb7, 0x0d, 0x66, - 0x38, 0xc1, 0x9c, 0xae, 0xb5, 0xa0, 0xed, 0x2d, 0x84, 0x37, 0x43, 0x12, 0x88, 0xe4, 0xea, 0x6c, - 0xad, 0x61, 0x09, 0xc4, 0x31, 0x9e, 0xae, 0x35, 0xf6, 0xe7, 0x4a, 0xe0, 0xb7, 0x79, 0xae, 0x0d, - 0xb1, 0xd6, 0xb0, 0x82, 0x62, 0x8d, 0x82, 0xee, 0x45, 0xec, 0xdf, 0xba, 0xef, 0x61, 0xdf, 0x8f, - 0xe4, 0xee, 0xc5, 0x3c, 0x11, 0x34, 0x38, 0x36, 0xa8, 0xd0, 0x2a, 0xa0, 0xb0, 0xdd, 0x6a, 0x35, - 0x98, 0x33, 0x93, 0xd3, 0x60, 0xac, 0xb8, 0x97, 0x47, 0x91, 0xc7, 0x0a, 0xae, 0xa6, 0xb0, 0x38, - 0xa3, 0x04, 0x3d, 0x96, 0xb6, 0x44, 0x53, 0xfb, 0x59, 0x53, 0xb9, 0xc5, 0xa7, 0xca, 0xdb, 0x29, - 0x71, 0x68, 0x05, 0x06, 0xc3, 0xfd, 0xb0, 0x16, 0x89, 0xd0, 0x8e, 0x39, 0x19, 0xae, 0xaa, 0x8c, - 0x44, 0x4b, 0xb0, 0xc8, 0x8b, 0x60, 0x59, 0x16, 0xd5, 0x60, 0x4a, 0x70, 0x5c, 0xda, 0x71, 0x3c, - 0x95, 0x2f, 0x88, 0xfb, 0x74, 0x5f, 0xbe, 0x77, 0x30, 0x3b, 0x25, 0x6a, 0xd6, 0xd1, 0x87, 0x07, - 0xb3, 0x67, 0x2a, 0x7e, 0x3d, 0x03, 0x83, 0xb3, 0xb8, 0xf1, 0xc9, 0x57, 0xab, 0xf9, 0xcd, 0x56, - 0x25, 0xf0, 0xb7, 0xdc, 0x06, 0xe9, 0x64, 0x35, 0xab, 0x1a, 0x94, 0x62, 0xf2, 0x19, 0x30, 0x9c, - 0xe0, 0x66, 0x7f, 0x96, 0x89, 0x6e, 0x2c, 0x9f, 0x78, 0xd4, 0x0e, 0x08, 0x6a, 0xc2, 0x68, 0x8b, - 0x2d, 0x6e, 0x91, 0x01, 0x43, 0xcc, 0xf5, 0x17, 0x7a, 0xd4, 0xfe, 0xdc, 0xa1, 0x27, 0x9e, 0xe9, - 0x19, 0x55, 0xd1, 0xd9, 0x61, 0x93, 0xbb, 0xfd, 0x6f, 0xce, 0xb2, 0xc3, 0xbf, 0xca, 0x55, 0x3a, - 0x83, 0xe2, 0x09, 0x89, 0xb8, 0x45, 0xce, 0xe4, 0xeb, 0x16, 0xe3, 0x61, 0x11, 0xcf, 0x50, 0xb0, - 0x2c, 0x8b, 0x3e, 0x01, 0x63, 0xf4, 0x52, 0xa6, 0x0e, 0xe0, 0x70, 0xfa, 0x54, 0x7e, 0xa8, 0x0f, - 0x45, 0xa5, 0x67, 0xc7, 0xd1, 0x0b, 0xe3, 0x04, 0x33, 0xf4, 0x3a, 0xf3, 0x44, 0x92, 0xac, 0x0b, - 0xbd, 0xb0, 0xd6, 0x9d, 0x8e, 0x24, 0x5b, 0x8d, 0x09, 0x6a, 0xc3, 0x54, 0x3a, 0x97, 0x5e, 0x38, - 0x6d, 0xe7, 0x4b, 0xb7, 0xe9, 0x74, 0x78, 0x71, 0x1a, 0x93, 0x34, 0x2e, 0xc4, 0x59, 0xfc, 0xd1, - 0x75, 0x18, 0x15, 0x49, 0xb5, 0xc5, 0xcc, 0x2d, 0x1a, 0x2a, 0xcf, 0x51, 0xac, 0x23, 0x0f, 0x93, - 0x00, 0x6c, 0x16, 0x46, 0xdb, 0x70, 0x4e, 0x4b, 0x72, 0x75, 0x25, 0x70, 0x98, 0xdf, 0x82, 0xcb, - 0xb6, 0x53, 0x4d, 0x2c, 0x79, 0xf4, 0xde, 0xc1, 0xec, 0xb9, 0x8d, 0x4e, 0x84, 0xb8, 0x33, 0x1f, - 0x74, 0x03, 0x4e, 0xf3, 0x87, 0xea, 0xcb, 0xc4, 0xa9, 0x37, 0x5c, 0x4f, 0xc9, 0x3d, 0x7c, 0xc9, - 0x9f, 0xbd, 0x77, 0x30, 0x7b, 0x7a, 0x21, 0x8b, 0x00, 0x67, 0x97, 0x43, 0x1f, 0x84, 0x52, 0xdd, - 0x0b, 0x45, 0x1f, 0x0c, 0x18, 0x79, 0xc4, 0x4a, 0xcb, 0xeb, 0x55, 0xf5, 0xfd, 0xf1, 0x1f, 0x1c, - 0x17, 0x40, 0xdb, 0x5c, 0x2d, 0xae, 0x94, 0x35, 0x83, 0xa9, 0x40, 0x5d, 0x49, 0x7d, 0xa6, 0xf1, - 0x54, 0x95, 0xdb, 0x83, 0xd4, 0x0b, 0x0e, 0xe3, 0x15, 0xab, 0xc1, 0x18, 0xbd, 0x06, 0x48, 0xc4, - 0xab, 0x5f, 0xa8, 0xb1, 0xf4, 0x2a, 0xcc, 0x8a, 0x30, 0x64, 0x3e, 0x9e, 0xac, 0xa6, 0x28, 0x70, - 0x46, 0x29, 0x74, 0x95, 0xee, 0x2a, 0x3a, 0x54, 0xec, 0x5a, 0x2a, 0xeb, 0xe3, 0x32, 0x69, 0x05, - 0x84, 0xf9, 0x61, 0x99, 0x1c, 0x71, 0xa2, 0x1c, 0xaa, 0xc3, 0x23, 0x4e, 0x3b, 0xf2, 0x99, 0xc5, - 0xc1, 0x24, 0xdd, 0xf0, 0x77, 0x89, 0xc7, 0x8c, 0x7d, 0x43, 0x8b, 0x17, 0xa8, 0x60, 0xb5, 0xd0, - 0x81, 0x0e, 0x77, 0xe4, 0x42, 0x05, 0x62, 0x95, 0xe6, 0x19, 0xcc, 0xf0, 0x63, 0x19, 0xa9, 0x9e, - 0x5f, 0x84, 0xe1, 0x1d, 0x3f, 0x8c, 0xd6, 0x49, 0x74, 0xc7, 0x0f, 0x76, 0x45, 0x18, 0xdd, 0x38, - 0x28, 0x79, 0x8c, 0xc2, 0x3a, 0x1d, 0xbd, 0xf1, 0x32, 0x57, 0x94, 0xf2, 0x32, 0xf3, 0x02, 0x18, - 0x8a, 0xf7, 0x98, 0xab, 0x1c, 0x8c, 0x25, 0x5e, 0x92, 0x96, 0x2b, 0x4b, 0xcc, 0xa2, 0x9f, 0x20, - 0x2d, 0x57, 0x96, 0xb0, 0xc4, 0xd3, 0xe9, 0x1a, 0xee, 0x38, 0x01, 0xa9, 0x04, 0x7e, 0x8d, 0x84, - 0x5a, 0x28, 0xfc, 0x87, 0x79, 0x90, 0x60, 0x3a, 0x5d, 0xab, 0x59, 0x04, 0x38, 0xbb, 0x1c, 0x22, - 0xe9, 0x04, 0x6f, 0x63, 0xf9, 0xa6, 0x98, 0xb4, 0x3c, 0xd3, 0x63, 0x8e, 0x37, 0x0f, 0x26, 0x54, - 0x6a, 0x39, 0x1e, 0x16, 0x38, 0x9c, 0x1e, 0x67, 0x73, 0xbb, 0xf7, 0x98, 0xc2, 0xca, 0xb8, 0x55, - 0x4e, 0x70, 0xc2, 0x29, 0xde, 0x46, 0x84, 0xb9, 0x89, 0xae, 0x11, 0xe6, 0xe6, 0xa1, 0x14, 0xb6, - 0x37, 0xeb, 0x7e, 0xd3, 0x71, 0x3d, 0x66, 0xd1, 0xd7, 0xae, 0x5e, 0x55, 0x89, 0xc0, 0x31, 0x0d, - 0x5a, 0x85, 0x21, 0x47, 0x5a, 0xae, 0x50, 0x7e, 0x4c, 0x21, 0x65, 0xaf, 0xe2, 0x61, 0x36, 0xa4, - 0xad, 0x4a, 0x95, 0x45, 0xaf, 0xc0, 0xa8, 0x78, 0x68, 0x2d, 0xb2, 0x9a, 0x4e, 0x99, 0xaf, 0xe1, - 0xaa, 0x3a, 0x12, 0x9b, 0xb4, 0xe8, 0x26, 0x0c, 0x47, 0x7e, 0x83, 0x3d, 0xe9, 0xa2, 0x62, 0xde, - 0x99, 0xfc, 0xe8, 0x78, 0x1b, 0x8a, 0x4c, 0x57, 0x1a, 0xab, 0xa2, 0x58, 0xe7, 0x83, 0x36, 0xf8, - 0x7c, 0x67, 0x81, 0xef, 0x49, 0x38, 0xfd, 0x50, 0xfe, 0x99, 0xa4, 0xe2, 0xe3, 0x9b, 0xcb, 0x41, - 0x94, 0xc4, 0x3a, 0x1b, 0x74, 0x05, 0x26, 0x5b, 0x81, 0xeb, 0xb3, 0x39, 0xa1, 0x8c, 0x96, 0xd3, - 0x66, 0x9a, 0xab, 0x4a, 0x92, 0x00, 0xa7, 0xcb, 0xb0, 0x77, 0xf2, 0x02, 0x38, 0x7d, 0x96, 0xa7, - 0xea, 0xe0, 0x37, 0x59, 0x0e, 0xc3, 0x0a, 0x8b, 0xd6, 0xd8, 0x4e, 0xcc, 0x95, 0x30, 0xd3, 0x33, - 0xf9, 0x61, 0x8c, 0x74, 0x65, 0x0d, 0x17, 0x5e, 0xd5, 0x5f, 0x1c, 0x73, 0x40, 0x75, 0x2d, 0x43, - 0x26, 0xbd, 0x02, 0x84, 0xd3, 0x8f, 0x74, 0xf0, 0x07, 0x4c, 0x5c, 0x8a, 0x62, 0x81, 0xc0, 0x00, - 0x87, 0x38, 0xc1, 0x13, 0x7d, 0x18, 0x26, 0x44, 0xf0, 0xc5, 0xb8, 0x9b, 0xce, 0xc5, 0x8e, 0xf2, - 0x38, 0x81, 0xc3, 0x29, 0x6a, 0x9e, 0x2a, 0xc3, 0xd9, 0x6c, 0x10, 0xb1, 0xf5, 0x5d, 0x77, 0xbd, - 0xdd, 0x70, 0xfa, 0x3c, 0xdb, 0x1f, 0x44, 0xaa, 0x8c, 0x24, 0x16, 0x67, 0x94, 0x40, 0x1b, 0x30, - 0xd1, 0x0a, 0x08, 0x69, 0x32, 0x41, 0x5f, 0x9c, 0x67, 0xb3, 0x3c, 0x4c, 0x04, 0x6d, 0x49, 0x25, - 0x81, 0x3b, 0xcc, 0x80, 0xe1, 0x14, 0x07, 0x74, 0x07, 0x86, 0xfc, 0x3d, 0x12, 0xec, 0x10, 0xa7, - 0x3e, 0x7d, 0xa1, 0xc3, 0xc3, 0x0d, 0x71, 0xb8, 0xdd, 0x10, 0xb4, 0x09, 0x47, 0x07, 0x09, 0xee, - 0xee, 0xe8, 0x20, 0x2b, 0x43, 0x7f, 0xde, 0x82, 0xb3, 0xd2, 0x36, 0x52, 0x6d, 0xd1, 0x5e, 0x5f, - 0xf2, 0xbd, 0x30, 0x0a, 0x78, 0x60, 0x83, 0x47, 0xf3, 0x1f, 0xfb, 0x6f, 0xe4, 0x14, 0x52, 0x7a, - 0xe0, 0xb3, 0x79, 0x14, 0x21, 0xce, 0xaf, 0x11, 0x2d, 0xc1, 0x64, 0x48, 0x22, 0xb9, 0x19, 0x2d, - 0x84, 0xab, 0xaf, 0x2f, 0xaf, 0x4f, 0x3f, 0xc6, 0xa3, 0x32, 0xd0, 0xc5, 0x50, 0x4d, 0x22, 0x71, - 0x9a, 0x1e, 0x5d, 0x86, 0x82, 0x1f, 0x4e, 0x3f, 0xde, 0x21, 0xa9, 0xaa, 0x5f, 0xbf, 0x51, 0xe5, - 0x0e, 0x6f, 0x37, 0xaa, 0xb8, 0xe0, 0x87, 0x32, 0x5d, 0x05, 0xbd, 0x8f, 0x85, 0xd3, 0x4f, 0x70, - 0xad, 0xa1, 0x4c, 0x57, 0xc1, 0x80, 0x38, 0xc6, 0xa3, 0x1d, 0x18, 0x0f, 0x8d, 0x7b, 0x6f, 0x38, - 0x7d, 0x91, 0xf5, 0xd4, 0x13, 0x79, 0x83, 0x66, 0x50, 0x6b, 0xd1, 0xe6, 0x4d, 0x2e, 0x38, 0xc9, - 0x96, 0xaf, 0x2e, 0xed, 0x82, 0x1f, 0x4e, 0x3f, 0xd9, 0x65, 0x75, 0x69, 0xc4, 0xfa, 0xea, 0xd2, - 0x79, 0xe0, 0x04, 0xcf, 0x99, 0xef, 0x84, 0xc9, 0x94, 0xb8, 0x74, 0x94, 0x4c, 0x4c, 0x33, 0xbb, - 0x30, 0x6a, 0x4c, 0xc9, 0x07, 0xea, 0x58, 0xf0, 0x2f, 0x06, 0xa1, 0xa4, 0x8c, 0xce, 0x68, 0xde, - 0xf4, 0x25, 0x38, 0x9b, 0xf4, 0x25, 0x18, 0xaa, 0xf8, 0x75, 0xc3, 0x7d, 0x60, 0x23, 0x23, 0x76, - 0x5f, 0xde, 0x06, 0xd8, 0xfb, 0x9b, 0x06, 0x4d, 0x93, 0x5f, 0xec, 0xd9, 0x29, 0xa1, 0xaf, 0xa3, - 0x71, 0xe0, 0x0a, 0x4c, 0x7a, 0x3e, 0x93, 0xd1, 0x49, 0x5d, 0x0a, 0x60, 0x4c, 0xce, 0x2a, 0xe9, - 0xc1, 0x70, 0x12, 0x04, 0x38, 0x5d, 0x86, 0x56, 0xc8, 0x05, 0xa5, 0xa4, 0x35, 0x82, 0xcb, 0x51, - 0x58, 0x60, 0xd1, 0x63, 0xd0, 0xdf, 0xf2, 0xeb, 0xe5, 0x8a, 0x90, 0xcf, 0xb5, 0x88, 0xb1, 0xf5, - 0x72, 0x05, 0x73, 0x1c, 0x5a, 0x80, 0x01, 0xf6, 0x23, 0x9c, 0x1e, 0xc9, 0x8f, 0x7a, 0xc2, 0x4a, - 0x68, 0x79, 0xae, 0x58, 0x01, 0x2c, 0x0a, 0x32, 0xad, 0x28, 0xbd, 0xd4, 0x30, 0xad, 0xe8, 0xe0, - 0x7d, 0x6a, 0x45, 0x25, 0x03, 0x1c, 0xf3, 0x42, 0x77, 0xe1, 0xb4, 0x71, 0x91, 0xe4, 0x53, 0x84, - 0x84, 0x22, 0xf2, 0xc2, 0x63, 0x1d, 0x6f, 0x90, 0xc2, 0x89, 0xe1, 0x9c, 0x68, 0xf4, 0xe9, 0x72, - 0x16, 0x27, 0x9c, 0x5d, 0x01, 0x6a, 0xc0, 0x64, 0x2d, 0x55, 0xeb, 0x50, 0xef, 0xb5, 0xaa, 0x01, - 0x4d, 0xd7, 0x98, 0x66, 0x8c, 0x5e, 0x81, 0xa1, 0x37, 0xfd, 0x90, 0x9d, 0x6d, 0xe2, 0x4e, 0x21, - 0x9f, 0xed, 0x0f, 0xbd, 0x7e, 0xa3, 0xca, 0xe0, 0x87, 0x07, 0xb3, 0xc3, 0x15, 0xbf, 0x2e, 0xff, - 0x62, 0x55, 0x00, 0x7d, 0xbf, 0x05, 0x33, 0xe9, 0x9b, 0xaa, 0x6a, 0xf4, 0x68, 0xef, 0x8d, 0xb6, - 0x45, 0xa5, 0x33, 0x2b, 0xb9, 0xec, 0x70, 0x87, 0xaa, 0xec, 0x5f, 0xb4, 0x98, 0x6e, 0x55, 0x18, - 0x07, 0x49, 0xd8, 0x6e, 0x9c, 0x44, 0x7a, 0xdf, 0x15, 0xc3, 0x6e, 0x79, 0xdf, 0x4e, 0x2d, 0xff, - 0xc4, 0x62, 0x4e, 0x2d, 0x27, 0xf8, 0x7a, 0xe5, 0x75, 0x18, 0x8a, 0x64, 0xda, 0xe5, 0x0e, 0x19, - 0x89, 0xb5, 0x46, 0x31, 0xc7, 0x1e, 0x25, 0xe1, 0xab, 0x0c, 0xcb, 0x8a, 0x8d, 0xfd, 0x0f, 0xf8, - 0x08, 0x48, 0xcc, 0x09, 0x98, 0x87, 0x96, 0x4d, 0xf3, 0xd0, 0x6c, 0x97, 0x2f, 0xc8, 0x31, 0x13, - 0xfd, 0x7d, 0xb3, 0xdd, 0x4c, 0xb3, 0xf5, 0x4e, 0xf7, 0xa6, 0xb2, 0x3f, 0x6f, 0x01, 0xc4, 0x01, - 0xb9, 0x7b, 0x48, 0xac, 0xf7, 0x12, 0x95, 0xe9, 0xfd, 0xc8, 0xaf, 0xf9, 0x0d, 0x61, 0xfc, 0x7c, - 0x24, 0xb6, 0x50, 0x71, 0xf8, 0xa1, 0xf6, 0x1b, 0x2b, 0x6a, 0x34, 0x2b, 0xc3, 0xff, 0x15, 0x63, - 0x9b, 0xa9, 0x11, 0xfa, 0xef, 0x8b, 0x16, 0x9c, 0xca, 0x72, 0x85, 0xa6, 0x37, 0x44, 0xae, 0xe3, - 0x53, 0x9e, 0x6e, 0x6a, 0x34, 0x6f, 0x09, 0x38, 0x56, 0x14, 0x3d, 0x67, 0x2c, 0x3c, 0x5a, 0x24, - 0xec, 0x1b, 0x30, 0x5a, 0x09, 0x88, 0x76, 0xb8, 0xbe, 0xca, 0x43, 0x4a, 0xf0, 0xf6, 0x3c, 0x73, - 0xe4, 0x70, 0x12, 0xf6, 0x97, 0x0b, 0x70, 0x8a, 0x3b, 0x8c, 0x2c, 0xec, 0xf9, 0x6e, 0xbd, 0xe2, - 0xd7, 0xc5, 0x83, 0xb7, 0x8f, 0xc1, 0x48, 0x4b, 0x53, 0xcc, 0x76, 0x8a, 0xea, 0xaa, 0x2b, 0x70, - 0x63, 0x55, 0x92, 0x0e, 0xc5, 0x06, 0x2f, 0x54, 0x87, 0x11, 0xb2, 0xe7, 0xd6, 0x94, 0xd7, 0x41, - 0xe1, 0xc8, 0x07, 0x9d, 0xaa, 0x65, 0x45, 0xe3, 0x83, 0x0d, 0xae, 0x0f, 0x20, 0x8f, 0xb8, 0xfd, - 0xa3, 0x16, 0x3c, 0x94, 0x13, 0x03, 0x96, 0x56, 0x77, 0x87, 0xb9, 0xe6, 0x88, 0x69, 0xab, 0xaa, - 0xe3, 0x0e, 0x3b, 0x58, 0x60, 0xd1, 0x47, 0x00, 0xb8, 0xc3, 0x0d, 0xf1, 0x6a, 0x5d, 0x83, 0x65, - 0x1a, 0x71, 0xfe, 0xb4, 0x90, 0x6d, 0xb2, 0x3c, 0xd6, 0x78, 0xd9, 0x5f, 0xec, 0x83, 0x7e, 0xe6, - 0xe0, 0x81, 0x2a, 0x30, 0xb8, 0xc3, 0xb3, 0xfa, 0x74, 0x1c, 0x37, 0x4a, 0x2b, 0x13, 0x05, 0xc5, - 0xe3, 0xa6, 0x41, 0xb1, 0x64, 0x83, 0xd6, 0x60, 0x8a, 0x27, 0x57, 0x6a, 0x2c, 0x93, 0x86, 0xb3, - 0x2f, 0x75, 0x9e, 0x3c, 0x13, 0xb0, 0xd2, 0xfd, 0x96, 0xd3, 0x24, 0x38, 0xab, 0x1c, 0x7a, 0x15, - 0xc6, 0xe8, 0x1d, 0xd4, 0x6f, 0x47, 0x92, 0x13, 0x4f, 0xab, 0xa4, 0xc4, 0xf2, 0x0d, 0x03, 0x8b, - 0x13, 0xd4, 0xe8, 0x15, 0x18, 0x6d, 0xa5, 0xb4, 0xbb, 0xfd, 0xb1, 0x1a, 0xc4, 0xd4, 0xe8, 0x9a, - 0xb4, 0xcc, 0x1b, 0xba, 0xcd, 0x7c, 0xbf, 0x37, 0x76, 0x02, 0x12, 0xee, 0xf8, 0x8d, 0x3a, 0x13, - 0xff, 0xfa, 0x35, 0x6f, 0xe8, 0x04, 0x1e, 0xa7, 0x4a, 0x50, 0x2e, 0x5b, 0x8e, 0xdb, 0x68, 0x07, - 0x24, 0xe6, 0x32, 0x60, 0x72, 0x59, 0x4d, 0xe0, 0x71, 0xaa, 0x44, 0x77, 0xb5, 0xf5, 0xe0, 0xf1, - 0xa8, 0xad, 0xed, 0x9f, 0x2a, 0x80, 0x31, 0xb4, 0xdf, 0xbe, 0xe9, 0x9e, 0xe8, 0x97, 0x6d, 0x07, - 0xad, 0x9a, 0x70, 0x66, 0xca, 0xfc, 0xb2, 0x38, 0x8b, 0x2b, 0xff, 0x32, 0xfa, 0x1f, 0xb3, 0x52, - 0x74, 0x8d, 0x9f, 0xae, 0x04, 0x3e, 0x3d, 0xe4, 0x64, 0xd0, 0x31, 0xf5, 0xe8, 0x60, 0x50, 0x3e, - 0xc8, 0xee, 0x10, 0x9e, 0x53, 0xb8, 0x65, 0x73, 0x0e, 0x86, 0xdf, 0x4f, 0x55, 0x44, 0x46, 0x90, - 0x5c, 0xd0, 0x65, 0x18, 0x16, 0x39, 0x7c, 0x98, 0x6f, 0x3c, 0x5f, 0x4c, 0xcc, 0x4f, 0x69, 0x39, - 0x06, 0x63, 0x9d, 0xc6, 0xfe, 0x81, 0x02, 0x4c, 0x65, 0x3c, 0x6e, 0xe2, 0xc7, 0xc8, 0xb6, 0x1b, - 0x46, 0x2a, 0x51, 0xac, 0x76, 0x8c, 0x70, 0x38, 0x56, 0x14, 0x74, 0xaf, 0xe2, 0x07, 0x55, 0xf2, - 0x70, 0x12, 0x8f, 0x07, 0x04, 0xf6, 0x88, 0x29, 0x57, 0x2f, 0x40, 0x5f, 0x3b, 0x24, 0x32, 0xb0, - 0xae, 0x3a, 0xb6, 0x99, 0x4d, 0x97, 0x61, 0xe8, 0x35, 0x6a, 0x5b, 0x99, 0x47, 0xb5, 0x6b, 0x14, - 0x37, 0x90, 0x72, 0x1c, 0x6d, 0x5c, 0x44, 0x3c, 0xc7, 0x8b, 0xc4, 0x65, 0x2b, 0x8e, 0x10, 0xc9, - 0xa0, 0x58, 0x60, 0xed, 0x2f, 0x14, 0xe1, 0x6c, 0xee, 0x73, 0x47, 0xda, 0xf4, 0xa6, 0xef, 0xb9, - 0x91, 0xaf, 0x1c, 0xc0, 0x78, 0x54, 0x48, 0xd2, 0xda, 0x59, 0x13, 0x70, 0xac, 0x28, 0xd0, 0x45, - 0xe8, 0x67, 0x1a, 0xe1, 0x54, 0xca, 0xdc, 0xc5, 0x65, 0x1e, 0x26, 0x8c, 0xa3, 0x7b, 0xce, 0x72, - 0xfe, 0x18, 0x95, 0x60, 0xfc, 0x46, 0xf2, 0x40, 0xa1, 0xcd, 0xf5, 0xfd, 0x06, 0x66, 0x48, 0xf4, - 0x84, 0xe8, 0xaf, 0x84, 0xc7, 0x13, 0x76, 0xea, 0x7e, 0xa8, 0x75, 0xda, 0x53, 0x30, 0xb8, 0x4b, - 0xf6, 0x03, 0xd7, 0xdb, 0x4e, 0x7a, 0xc2, 0x5d, 0xe3, 0x60, 0x2c, 0xf1, 0x66, 0x8e, 0xc7, 0xc1, - 0xe3, 0x4e, 0x4f, 0x3e, 0xd4, 0x55, 0x3c, 0xf9, 0xa1, 0x22, 0x8c, 0xe3, 0xc5, 0xe5, 0x77, 0x07, - 0xe2, 0x66, 0x7a, 0x20, 0x8e, 0x3b, 0x3d, 0x79, 0xf7, 0xd1, 0xf8, 0x39, 0x0b, 0xc6, 0x59, 0x26, - 0x21, 0x11, 0xd4, 0xc0, 0xf5, 0xbd, 0x13, 0xb8, 0x0a, 0x3c, 0x06, 0xfd, 0x01, 0xad, 0x34, 0x99, - 0x2b, 0x97, 0xb5, 0x04, 0x73, 0x1c, 0x7a, 0x04, 0xfa, 0x58, 0x13, 0xe8, 0xe0, 0x8d, 0xf0, 0x2d, - 0x78, 0xd9, 0x89, 0x1c, 0xcc, 0xa0, 0x2c, 0x48, 0x16, 0x26, 0xad, 0x86, 0xcb, 0x1b, 0x1d, 0xdb, - 0xeb, 0xdf, 0x19, 0x81, 0x10, 0x32, 0x9b, 0xf6, 0xf6, 0x82, 0x64, 0x65, 0xb3, 0xec, 0x7c, 0xcd, - 0xfe, 0xc3, 0x02, 0x9c, 0xcf, 0x2c, 0xd7, 0x73, 0x90, 0xac, 0xce, 0xa5, 0x1f, 0x64, 0xae, 0x98, - 0xe2, 0x09, 0xfa, 0x19, 0xf7, 0xf5, 0x2a, 0xfd, 0xf7, 0xf7, 0x10, 0xbb, 0x2a, 0xb3, 0xcb, 0xde, - 0x21, 0xb1, 0xab, 0x32, 0xdb, 0x96, 0xa3, 0x26, 0xf8, 0xd3, 0x42, 0xce, 0xb7, 0x30, 0x85, 0xc1, - 0x25, 0xba, 0xcf, 0x30, 0x64, 0x28, 0x2f, 0xe1, 0x7c, 0x8f, 0xe1, 0x30, 0xac, 0xb0, 0x68, 0x01, - 0xc6, 0x9b, 0xae, 0x47, 0x37, 0x9f, 0x7d, 0x53, 0x14, 0x57, 0x8a, 0xfc, 0x35, 0x13, 0x8d, 0x93, - 0xf4, 0xc8, 0xd5, 0xe2, 0x5a, 0xf1, 0xaf, 0x7b, 0xe5, 0x48, 0xab, 0x6e, 0xce, 0xf4, 0x65, 0x50, - 0xbd, 0x98, 0x11, 0xe3, 0x6a, 0x4d, 0xd3, 0x13, 0x15, 0x7b, 0xd7, 0x13, 0x8d, 0x64, 0xeb, 0x88, - 0x66, 0x5e, 0x81, 0xd1, 0xfb, 0x36, 0x0c, 0xd8, 0x5f, 0x2f, 0xc2, 0xc3, 0x1d, 0x96, 0x3d, 0xdf, - 0xeb, 0x8d, 0x31, 0xd0, 0xf6, 0xfa, 0xd4, 0x38, 0x54, 0xe0, 0xd4, 0x56, 0xbb, 0xd1, 0xd8, 0x67, - 0xcf, 0x6f, 0x48, 0x5d, 0x52, 0x08, 0x99, 0x52, 0x2a, 0x47, 0x4e, 0xad, 0x66, 0xd0, 0xe0, 0xcc, - 0x92, 0xf4, 0x8a, 0x45, 0x4f, 0x92, 0x7d, 0xc5, 0x2a, 0x71, 0xc5, 0xc2, 0x3a, 0x12, 0x9b, 0xb4, - 0xe8, 0x0a, 0x4c, 0x3a, 0x7b, 0x8e, 0xcb, 0x83, 0x83, 0x4b, 0x06, 0xfc, 0x8e, 0xa5, 0xf4, 0xb9, - 0x0b, 0x49, 0x02, 0x9c, 0x2e, 0x83, 0x5e, 0x03, 0xe4, 0x6f, 0x32, 0x27, 0xfd, 0xfa, 0x15, 0xe2, - 0x09, 0x93, 0x33, 0x1b, 0xbb, 0x62, 0xbc, 0x25, 0xdc, 0x48, 0x51, 0xe0, 0x8c, 0x52, 0x89, 0x20, - 0x4e, 0x03, 0xf9, 0x41, 0x9c, 0x3a, 0xef, 0x8b, 0x5d, 0xd3, 0x14, 0x5d, 0x86, 0xd1, 0x23, 0xba, - 0x9e, 0xda, 0xff, 0xc1, 0xa2, 0x27, 0x1e, 0x2f, 0x63, 0x46, 0x48, 0x7d, 0x85, 0xf9, 0xc6, 0x72, - 0xf5, 0xb0, 0x16, 0x25, 0xe7, 0xb4, 0xe6, 0x1b, 0x1b, 0x23, 0xb1, 0x49, 0xcb, 0xe7, 0x90, 0xe6, - 0xd3, 0x6a, 0xdc, 0x0a, 0x44, 0x18, 0x37, 0x45, 0x81, 0x3e, 0x0a, 0x83, 0x75, 0x77, 0xcf, 0x0d, - 0x85, 0x72, 0xec, 0xc8, 0x96, 0xa8, 0x78, 0xeb, 0x5c, 0xe6, 0x6c, 0xb0, 0xe4, 0x67, 0xff, 0x50, - 0x21, 0xee, 0x93, 0xd7, 0xdb, 0x7e, 0xe4, 0x9c, 0xc0, 0x49, 0x7e, 0xc5, 0x38, 0xc9, 0x9f, 0xe8, - 0x14, 0xcb, 0x8e, 0x35, 0x29, 0xf7, 0x04, 0xbf, 0x91, 0x38, 0xc1, 0x9f, 0xec, 0xce, 0xaa, 0xf3, - 0xc9, 0xfd, 0x0f, 0x2d, 0x98, 0x34, 0xe8, 0x4f, 0xe0, 0x00, 0x59, 0x35, 0x0f, 0x90, 0x47, 0xbb, - 0x7e, 0x43, 0xce, 0xc1, 0xf1, 0xbd, 0xc5, 0x44, 0xdb, 0xd9, 0x81, 0xf1, 0x26, 0xf4, 0xed, 0x38, - 0x41, 0xbd, 0x53, 0xee, 0x8e, 0x54, 0xa1, 0xb9, 0xab, 0x4e, 0x20, 0xcc, 0xf4, 0xcf, 0xc8, 0x5e, - 0xa7, 0xa0, 0xae, 0x26, 0x7a, 0x56, 0x15, 0x7a, 0x09, 0x06, 0xc2, 0x9a, 0xdf, 0x52, 0xef, 0x75, - 0x2e, 0xb0, 0x8e, 0x66, 0x90, 0xc3, 0x83, 0x59, 0x64, 0x56, 0x47, 0xc1, 0x58, 0xd0, 0xa3, 0x8f, - 0xc1, 0x28, 0xfb, 0xa5, 0x7c, 0xe6, 0x8a, 0xf9, 0x1a, 0x8c, 0xaa, 0x4e, 0xc8, 0x1d, 0x4a, 0x0d, - 0x10, 0x36, 0x59, 0xcd, 0x6c, 0x43, 0x49, 0x7d, 0xd6, 0x03, 0x35, 0xf5, 0xfe, 0xbb, 0x22, 0x4c, - 0x65, 0xcc, 0x39, 0x14, 0x1a, 0x23, 0x71, 0xb9, 0xc7, 0xa9, 0xfa, 0x36, 0xc7, 0x22, 0x64, 0x17, - 0xa8, 0xba, 0x98, 0x5b, 0x3d, 0x57, 0x7a, 0x33, 0x24, 0xc9, 0x4a, 0x29, 0xa8, 0x7b, 0xa5, 0xb4, - 0xb2, 0x13, 0xeb, 0x6a, 0x5a, 0x91, 0x6a, 0xe9, 0x03, 0x1d, 0xd3, 0x5f, 0xee, 0x83, 0x53, 0x59, - 0xe1, 0x35, 0xd1, 0x67, 0x12, 0x99, 0x63, 0x5f, 0xe8, 0x35, 0x30, 0x27, 0x4f, 0x27, 0x2b, 0xc2, - 0xfe, 0xcd, 0x99, 0xb9, 0x64, 0xbb, 0x76, 0xb3, 0xa8, 0x93, 0x05, 0x1e, 0x09, 0x78, 0xc6, 0x5f, - 0xb9, 0x7d, 0xbc, 0xbf, 0xe7, 0x06, 0x88, 0x54, 0xc1, 0x61, 0xc2, 0x1f, 0x47, 0x82, 0xbb, 0xfb, - 0xe3, 0xc8, 0x9a, 0x51, 0x19, 0x06, 0x6a, 0xdc, 0xd1, 0xa3, 0xd8, 0x7d, 0x0b, 0xe3, 0x5e, 0x1e, - 0x6a, 0x03, 0x16, 0xde, 0x1d, 0x82, 0xc1, 0x8c, 0x0b, 0xc3, 0x5a, 0xc7, 0x3c, 0xd0, 0xc9, 0xb3, - 0x4b, 0x0f, 0x3e, 0xad, 0x0b, 0x1e, 0xe8, 0x04, 0xfa, 0x51, 0x0b, 0x12, 0xaf, 0x3d, 0x94, 0x52, - 0xce, 0xca, 0x55, 0xca, 0x5d, 0x80, 0xbe, 0xc0, 0x6f, 0x90, 0x64, 0xb6, 0x56, 0xec, 0x37, 0x08, - 0x66, 0x18, 0x4a, 0x11, 0xc5, 0xaa, 0x96, 0x11, 0xfd, 0x1a, 0x29, 0x2e, 0x88, 0x8f, 0x41, 0x7f, - 0x83, 0xec, 0x91, 0x46, 0x32, 0xa9, 0xd6, 0x75, 0x0a, 0xc4, 0x1c, 0x67, 0xff, 0x5c, 0x1f, 0x9c, - 0xeb, 0x18, 0x05, 0x88, 0x5e, 0xc6, 0xb6, 0x9d, 0x88, 0xdc, 0x71, 0xf6, 0x93, 0xd9, 0x6f, 0xae, - 0x70, 0x30, 0x96, 0x78, 0xf6, 0xf4, 0x90, 0x07, 0xb1, 0x4f, 0xa8, 0x30, 0x45, 0xec, 0x7a, 0x81, - 0x35, 0x55, 0x62, 0xc5, 0xe3, 0x50, 0x89, 0x3d, 0x07, 0x10, 0x86, 0x0d, 0xee, 0x13, 0x57, 0x17, - 0x6f, 0x1a, 0xe3, 0x64, 0x07, 0xd5, 0xeb, 0x02, 0x83, 0x35, 0x2a, 0xb4, 0x0c, 0x13, 0xad, 0xc0, - 0x8f, 0xb8, 0x46, 0x78, 0x99, 0xbb, 0x8d, 0xf6, 0x9b, 0x01, 0x58, 0x2a, 0x09, 0x3c, 0x4e, 0x95, - 0x40, 0x2f, 0xc2, 0xb0, 0x08, 0xca, 0x52, 0xf1, 0xfd, 0x86, 0x50, 0x42, 0x29, 0x4f, 0xca, 0x6a, - 0x8c, 0xc2, 0x3a, 0x9d, 0x56, 0x8c, 0xa9, 0x99, 0x07, 0x33, 0x8b, 0x71, 0x55, 0xb3, 0x46, 0x97, - 0x88, 0xda, 0x3b, 0xd4, 0x53, 0xd4, 0xde, 0x58, 0x2d, 0x57, 0xea, 0xd9, 0xea, 0x09, 0x5d, 0x15, - 0x59, 0x5f, 0xe9, 0x83, 0x29, 0x31, 0x71, 0x1e, 0xf4, 0x74, 0xb9, 0x99, 0x9e, 0x2e, 0xc7, 0xa1, - 0xb8, 0x7b, 0x77, 0xce, 0x9c, 0xf4, 0x9c, 0xf9, 0x61, 0x0b, 0x4c, 0x49, 0x0d, 0xfd, 0x99, 0xdc, - 0xf4, 0x61, 0x2f, 0xe6, 0x4a, 0x7e, 0xca, 0x6b, 0xf0, 0x6d, 0x26, 0x12, 0xb3, 0xff, 0xbd, 0x05, - 0x8f, 0x76, 0xe5, 0x88, 0x56, 0xa0, 0xc4, 0xc4, 0x49, 0xed, 0xa2, 0xf7, 0xa4, 0x72, 0x2b, 0x97, - 0x88, 0x1c, 0xe9, 0x36, 0x2e, 0x89, 0x56, 0x52, 0x79, 0xda, 0x9e, 0xca, 0xc8, 0xd3, 0x76, 0xda, - 0xe8, 0x9e, 0xfb, 0x4c, 0xd4, 0xf6, 0x83, 0xf4, 0xc4, 0x31, 0x9e, 0x74, 0xa1, 0xf7, 0x1b, 0x4a, - 0x47, 0x3b, 0xa1, 0x74, 0x44, 0x26, 0xb5, 0x76, 0x86, 0x7c, 0x18, 0x26, 0x58, 0xb4, 0x36, 0xf6, - 0xc8, 0x41, 0x3c, 0x36, 0x2b, 0xc4, 0x8e, 0xcc, 0xd7, 0x13, 0x38, 0x9c, 0xa2, 0xb6, 0xff, 0xa0, - 0x08, 0x03, 0x7c, 0xf9, 0x9d, 0xc0, 0xf5, 0xf2, 0x69, 0x28, 0xb9, 0xcd, 0x66, 0x9b, 0xa7, 0xde, - 0xea, 0x8f, 0xdd, 0x62, 0xcb, 0x12, 0x88, 0x63, 0x3c, 0x5a, 0x15, 0xfa, 0xee, 0x0e, 0x01, 0x61, - 0x79, 0xc3, 0xe7, 0x96, 0x9d, 0xc8, 0xe1, 0xb2, 0x92, 0x3a, 0x67, 0x63, 0xcd, 0x38, 0xfa, 0x24, - 0x40, 0x18, 0x05, 0xae, 0xb7, 0x4d, 0x61, 0x22, 0x0e, 0xf5, 0x7b, 0x3b, 0x70, 0xab, 0x2a, 0x62, - 0xce, 0x33, 0xde, 0x73, 0x14, 0x02, 0x6b, 0x1c, 0xd1, 0x9c, 0x71, 0xd2, 0xcf, 0x24, 0xc6, 0x0e, - 0x38, 0xd7, 0x78, 0xcc, 0x66, 0x3e, 0x00, 0x25, 0xc5, 0xbc, 0x9b, 0xf6, 0x6b, 0x44, 0x17, 0x8b, - 0x3e, 0x04, 0xe3, 0x89, 0xb6, 0x1d, 0x49, 0x79, 0xf6, 0xf3, 0x16, 0x8c, 0xf3, 0xc6, 0xac, 0x78, - 0x7b, 0xe2, 0x34, 0x78, 0x0b, 0x4e, 0x35, 0x32, 0x76, 0x65, 0x31, 0xfc, 0xbd, 0xef, 0xe2, 0x4a, - 0x59, 0x96, 0x85, 0xc5, 0x99, 0x75, 0xa0, 0x4b, 0x74, 0xc5, 0xd1, 0x5d, 0xd7, 0x69, 0x88, 0xb7, - 0xf5, 0x23, 0x7c, 0xb5, 0x71, 0x18, 0x56, 0x58, 0xfb, 0xb7, 0x2d, 0x98, 0xe4, 0x2d, 0xbf, 0x46, - 0xf6, 0xd5, 0xde, 0xf4, 0xcd, 0x6c, 0xbb, 0x48, 0xfa, 0x58, 0xc8, 0x49, 0xfa, 0xa8, 0x7f, 0x5a, - 0xb1, 0xe3, 0xa7, 0x7d, 0xd9, 0x02, 0x31, 0x43, 0x4e, 0x40, 0x9f, 0xf1, 0x9d, 0xa6, 0x3e, 0x63, - 0x26, 0x7f, 0x11, 0xe4, 0x28, 0x32, 0xfe, 0xc4, 0x82, 0x09, 0x4e, 0x10, 0xdb, 0xea, 0xbf, 0xa9, - 0xe3, 0xd0, 0x4b, 0x6a, 0xf8, 0x6b, 0x64, 0x7f, 0xc3, 0xaf, 0x38, 0xd1, 0x4e, 0xf6, 0x47, 0x19, - 0x83, 0xd5, 0xd7, 0x71, 0xb0, 0xea, 0x72, 0x01, 0x19, 0x39, 0x91, 0xba, 0xbc, 0x90, 0x3f, 0x6a, - 0x4e, 0x24, 0xfb, 0x1b, 0x16, 0x20, 0x5e, 0x8d, 0x21, 0xb8, 0x51, 0x71, 0x88, 0x41, 0xb5, 0x83, - 0x2e, 0xde, 0x9a, 0x14, 0x06, 0x6b, 0x54, 0xc7, 0xd2, 0x3d, 0x09, 0x87, 0x8b, 0x62, 0x77, 0x87, - 0x8b, 0x23, 0xf4, 0xe8, 0xbf, 0x1c, 0x80, 0xe4, 0xb3, 0x36, 0x74, 0x0b, 0x46, 0x6a, 0x4e, 0xcb, - 0xd9, 0x74, 0x1b, 0x6e, 0xe4, 0x92, 0xb0, 0x93, 0x37, 0xd6, 0x92, 0x46, 0x27, 0x4c, 0xe4, 0x1a, - 0x04, 0x1b, 0x7c, 0xd0, 0x1c, 0x40, 0x2b, 0x70, 0xf7, 0xdc, 0x06, 0xd9, 0x66, 0x6a, 0x17, 0x16, - 0xcd, 0x83, 0xbb, 0x86, 0x49, 0x28, 0xd6, 0x28, 0x32, 0x62, 0x08, 0x14, 0x1f, 0x70, 0x0c, 0x01, - 0x38, 0xb1, 0x18, 0x02, 0x7d, 0x47, 0x8a, 0x21, 0x30, 0x74, 0xe4, 0x18, 0x02, 0xfd, 0x3d, 0xc5, - 0x10, 0xc0, 0x70, 0x46, 0xca, 0x9e, 0xf4, 0xff, 0xaa, 0xdb, 0x20, 0xe2, 0xc2, 0xc1, 0x43, 0x90, - 0xcc, 0xdc, 0x3b, 0x98, 0x3d, 0x83, 0x33, 0x29, 0x70, 0x4e, 0x49, 0xf4, 0x11, 0x98, 0x76, 0x1a, - 0x0d, 0xff, 0x8e, 0x1a, 0xd4, 0x95, 0xb0, 0xe6, 0x34, 0xb8, 0x09, 0x64, 0x90, 0x71, 0x7d, 0xe4, - 0xde, 0xc1, 0xec, 0xf4, 0x42, 0x0e, 0x0d, 0xce, 0x2d, 0x8d, 0x3e, 0x08, 0xa5, 0x56, 0xe0, 0xd7, - 0xd6, 0xb4, 0xb7, 0xb7, 0xe7, 0x69, 0x07, 0x56, 0x24, 0xf0, 0xf0, 0x60, 0x76, 0x54, 0xfd, 0x61, - 0x07, 0x7e, 0x5c, 0x20, 0x23, 0x28, 0xc0, 0xf0, 0xb1, 0x06, 0x05, 0xd8, 0x85, 0xa9, 0x2a, 0x09, - 0x5c, 0xa7, 0xe1, 0xbe, 0x45, 0xe5, 0x65, 0xb9, 0x3f, 0x6d, 0x40, 0x29, 0x48, 0xec, 0xc8, 0x3d, - 0x05, 0x69, 0xd5, 0x92, 0xd3, 0xc8, 0x1d, 0x38, 0x66, 0x64, 0xff, 0x6f, 0x0b, 0x06, 0xc5, 0x33, - 0xb6, 0x13, 0x90, 0x1a, 0x17, 0x0c, 0xa3, 0xc4, 0x6c, 0x76, 0x87, 0xb1, 0xc6, 0xe4, 0x9a, 0x23, - 0xca, 0x09, 0x73, 0xc4, 0xa3, 0x9d, 0x98, 0x74, 0x36, 0x44, 0xfc, 0xd5, 0x22, 0x95, 0xde, 0x8d, - 0x07, 0xd5, 0x0f, 0xbe, 0x0b, 0xd6, 0x61, 0x30, 0x14, 0x0f, 0x7a, 0x0b, 0xf9, 0x2f, 0x2a, 0x92, - 0x83, 0x18, 0x7b, 0xd1, 0x89, 0x27, 0xbc, 0x92, 0x49, 0xe6, 0x4b, 0xe1, 0xe2, 0x03, 0x7c, 0x29, - 0xdc, 0xed, 0xc9, 0x79, 0xdf, 0x71, 0x3c, 0x39, 0xb7, 0xbf, 0xc6, 0x4e, 0x4e, 0x1d, 0x7e, 0x02, - 0x42, 0xd5, 0x15, 0xf3, 0x8c, 0xb5, 0x3b, 0xcc, 0x2c, 0xd1, 0xa8, 0x1c, 0xe1, 0xea, 0x67, 0x2d, - 0x38, 0x97, 0xf1, 0x55, 0x9a, 0xa4, 0xf5, 0x0c, 0x0c, 0x39, 0xed, 0xba, 0xab, 0xd6, 0xb2, 0x66, - 0x9a, 0x5c, 0x10, 0x70, 0xac, 0x28, 0xd0, 0x12, 0x4c, 0x92, 0xbb, 0x2d, 0x97, 0x1b, 0x72, 0x75, - 0xe7, 0xe3, 0x22, 0x7f, 0xfb, 0xb8, 0x92, 0x44, 0xe2, 0x34, 0xbd, 0x0a, 0x4e, 0x54, 0xcc, 0x0d, - 0x4e, 0xf4, 0xb7, 0x2d, 0x18, 0x56, 0x4f, 0x5a, 0x1f, 0x78, 0x6f, 0x7f, 0xd8, 0xec, 0xed, 0x87, - 0x3b, 0xf4, 0x76, 0x4e, 0x37, 0xff, 0x66, 0x41, 0xb5, 0xb7, 0xe2, 0x07, 0x51, 0x0f, 0x12, 0xdc, - 0xfd, 0x3f, 0x9c, 0xb8, 0x0c, 0xc3, 0x4e, 0xab, 0x25, 0x11, 0xd2, 0x03, 0x8e, 0x85, 0xdc, 0x8e, - 0xc1, 0x58, 0xa7, 0x51, 0xef, 0x38, 0x8a, 0xb9, 0xef, 0x38, 0xea, 0x00, 0x91, 0x13, 0x6c, 0x93, - 0x88, 0xc2, 0x84, 0xc3, 0x6e, 0xfe, 0x7e, 0xd3, 0x8e, 0xdc, 0xc6, 0x9c, 0xeb, 0x45, 0x61, 0x14, - 0xcc, 0x95, 0xbd, 0xe8, 0x46, 0xc0, 0xaf, 0x90, 0x5a, 0x78, 0x2f, 0xc5, 0x0b, 0x6b, 0x7c, 0x65, - 0xf8, 0x06, 0x56, 0x47, 0xbf, 0xe9, 0x4a, 0xb1, 0x2e, 0xe0, 0x58, 0x51, 0xd8, 0x1f, 0x60, 0xa7, - 0x0f, 0xeb, 0xd3, 0xa3, 0x85, 0xb6, 0xfa, 0xf2, 0x88, 0x1a, 0x0d, 0x66, 0x14, 0x5d, 0xd6, 0x03, - 0x68, 0x75, 0xde, 0xec, 0x69, 0xc5, 0xfa, 0xab, 0xc2, 0x38, 0xca, 0x16, 0xfa, 0x78, 0xca, 0x3d, - 0xe6, 0xd9, 0x2e, 0xa7, 0xc6, 0x11, 0x1c, 0x62, 0x58, 0xfe, 0x1d, 0x96, 0x9d, 0xa4, 0x5c, 0x11, - 0xeb, 0x42, 0xcb, 0xbf, 0x23, 0x10, 0x38, 0xa6, 0xa1, 0xc2, 0x94, 0xfa, 0x13, 0x4e, 0xa3, 0x38, - 0x0e, 0xad, 0xa2, 0x0e, 0xb1, 0x46, 0x81, 0xe6, 0x85, 0x42, 0x81, 0xdb, 0x05, 0x1e, 0x4e, 0x28, - 0x14, 0x64, 0x77, 0x69, 0x5a, 0xa0, 0xcb, 0x30, 0xac, 0xb2, 0xad, 0x57, 0x78, 0xe6, 0x2b, 0x31, - 0xcd, 0x56, 0x62, 0x30, 0xd6, 0x69, 0xd0, 0x06, 0x8c, 0x87, 0x5c, 0xcf, 0xa6, 0x82, 0x83, 0x73, - 0x7d, 0xe5, 0x7b, 0xd5, 0x63, 0x62, 0x13, 0x7d, 0xc8, 0x40, 0x7c, 0x77, 0x92, 0x21, 0x16, 0x92, - 0x2c, 0xd0, 0xab, 0x30, 0xd6, 0xf0, 0x9d, 0xfa, 0xa2, 0xd3, 0x70, 0xbc, 0x1a, 0xeb, 0x9f, 0x21, - 0x33, 0x69, 0xef, 0x75, 0x03, 0x8b, 0x13, 0xd4, 0x54, 0x78, 0xd3, 0x21, 0x22, 0x44, 0x98, 0xe3, - 0x6d, 0x93, 0x50, 0xe4, 0xce, 0x66, 0xc2, 0xdb, 0xf5, 0x1c, 0x1a, 0x9c, 0x5b, 0x1a, 0xbd, 0x04, - 0x23, 0xf2, 0xf3, 0xb5, 0x88, 0x24, 0xf1, 0x93, 0x18, 0x0d, 0x87, 0x0d, 0x4a, 0x74, 0x07, 0x4e, - 0xcb, 0xff, 0x1b, 0x81, 0xb3, 0xb5, 0xe5, 0xd6, 0xc4, 0x33, 0x7d, 0xfe, 0x76, 0x76, 0x41, 0x3e, - 0xf0, 0x5c, 0xc9, 0x22, 0x3a, 0x3c, 0x98, 0xbd, 0x20, 0x7a, 0x2d, 0x13, 0xcf, 0x06, 0x31, 0x9b, - 0x3f, 0x5a, 0x83, 0xa9, 0x1d, 0xe2, 0x34, 0xa2, 0x9d, 0xa5, 0x1d, 0x52, 0xdb, 0x95, 0x8b, 0x8e, - 0xc5, 0x39, 0xd1, 0x9e, 0x8f, 0x5c, 0x4d, 0x93, 0xe0, 0xac, 0x72, 0xe8, 0x0d, 0x98, 0x6e, 0xb5, - 0x37, 0x1b, 0x6e, 0xb8, 0xb3, 0xee, 0x47, 0xcc, 0x11, 0x49, 0x25, 0x6f, 0x17, 0x01, 0x51, 0x54, - 0x24, 0x99, 0x4a, 0x0e, 0x1d, 0xce, 0xe5, 0x80, 0xde, 0x82, 0xd3, 0x89, 0xc9, 0x20, 0x42, 0x42, - 0x8c, 0xe5, 0xa7, 0x07, 0xa9, 0x66, 0x15, 0x10, 0xd1, 0x55, 0xb2, 0x50, 0x38, 0xbb, 0x0a, 0xf4, - 0x32, 0x80, 0xdb, 0x5a, 0x75, 0x9a, 0x6e, 0x83, 0x5e, 0x17, 0xa7, 0xd8, 0x3c, 0xa1, 0x57, 0x07, - 0x28, 0x57, 0x24, 0x94, 0xee, 0xcf, 0xe2, 0xdf, 0x3e, 0xd6, 0xa8, 0xd1, 0x75, 0x18, 0x13, 0xff, - 0xf6, 0xc5, 0xb0, 0xf2, 0xc8, 0x24, 0x8f, 0xb3, 0xb0, 0x52, 0x15, 0x1d, 0x73, 0x98, 0x82, 0xe0, - 0x44, 0x59, 0xb4, 0x0d, 0xe7, 0x64, 0xaa, 0x37, 0x7d, 0x8e, 0xca, 0x31, 0x08, 0x59, 0x4e, 0x8e, - 0x21, 0xfe, 0x32, 0x65, 0xa1, 0x13, 0x21, 0xee, 0xcc, 0x87, 0x9e, 0xed, 0xfa, 0x54, 0xe7, 0x6f, - 0x77, 0x4f, 0x73, 0x2f, 0x27, 0x7a, 0xb6, 0x5f, 0x4f, 0x22, 0x71, 0x9a, 0x1e, 0x85, 0x70, 0xda, - 0xf5, 0xb2, 0x66, 0xf6, 0x19, 0xc6, 0xe8, 0x43, 0xfc, 0xd9, 0x72, 0xe7, 0x59, 0x9d, 0x89, 0xe7, - 0xb3, 0x3a, 0x93, 0xf7, 0xdb, 0xf3, 0xff, 0xfb, 0x2d, 0x8b, 0x96, 0xd6, 0xa4, 0x74, 0xf4, 0x29, - 0x18, 0xd1, 0x3f, 0x4c, 0x48, 0x1c, 0x17, 0xb3, 0x85, 0x58, 0x6d, 0x6f, 0xe0, 0x32, 0xbe, 0x5a, - 0xff, 0x3a, 0x0e, 0x1b, 0x1c, 0x51, 0x2d, 0xe3, 0x81, 0xff, 0x7c, 0x6f, 0x12, 0x4d, 0xef, 0xee, - 0x6f, 0x04, 0xb2, 0xa7, 0x3c, 0xba, 0x0e, 0x43, 0xb5, 0x86, 0x4b, 0xbc, 0xa8, 0x5c, 0xe9, 0x14, - 0xc2, 0x70, 0x49, 0xd0, 0x88, 0x35, 0x24, 0x52, 0x6c, 0x70, 0x18, 0x56, 0x1c, 0xec, 0x5f, 0x2d, - 0xc0, 0x6c, 0x97, 0x7c, 0x2d, 0x09, 0x73, 0x94, 0xd5, 0x93, 0x39, 0x6a, 0x01, 0xc6, 0xe3, 0x7f, - 0xba, 0xa6, 0x4b, 0x79, 0xb4, 0xde, 0x32, 0xd1, 0x38, 0x49, 0xdf, 0xf3, 0xe3, 0x04, 0xdd, 0xa2, - 0xd5, 0xd7, 0xf5, 0x79, 0x8d, 0x61, 0xc9, 0xee, 0xef, 0xfd, 0xfa, 0x9b, 0x6b, 0x95, 0xb4, 0xbf, - 0x56, 0x80, 0xd3, 0xaa, 0x0b, 0xbf, 0x7d, 0x3b, 0xee, 0x66, 0xba, 0xe3, 0x8e, 0xc1, 0xa6, 0x6b, - 0xdf, 0x80, 0x01, 0x1e, 0x93, 0xb1, 0x07, 0xb1, 0xfb, 0x31, 0x33, 0x52, 0xb3, 0x92, 0xf4, 0x8c, - 0x68, 0xcd, 0xdf, 0x6f, 0xc1, 0x78, 0xe2, 0x95, 0x1b, 0xc2, 0xda, 0x53, 0xe8, 0xfb, 0x11, 0x8d, - 0xb3, 0x84, 0xee, 0x0b, 0xd0, 0xb7, 0xe3, 0x87, 0x51, 0xd2, 0xe1, 0xe3, 0xaa, 0x1f, 0x46, 0x98, - 0x61, 0xec, 0xdf, 0xb1, 0xa0, 0x7f, 0xc3, 0x71, 0xbd, 0x48, 0x1a, 0x07, 0xac, 0x1c, 0xe3, 0x40, - 0x2f, 0xdf, 0x85, 0x5e, 0x84, 0x01, 0xb2, 0xb5, 0x45, 0x6a, 0x91, 0x18, 0x55, 0x19, 0x47, 0x62, - 0x60, 0x85, 0x41, 0xa9, 0x1c, 0xc8, 0x2a, 0xe3, 0x7f, 0xb1, 0x20, 0x46, 0xb7, 0xa1, 0x14, 0xb9, - 0x4d, 0xb2, 0x50, 0xaf, 0x0b, 0x93, 0xf9, 0x7d, 0xc4, 0xc2, 0xd8, 0x90, 0x0c, 0x70, 0xcc, 0xcb, - 0xfe, 0x42, 0x01, 0x20, 0x0e, 0x66, 0xd5, 0xed, 0x13, 0x17, 0x53, 0xc6, 0xd4, 0x8b, 0x19, 0xc6, - 0x54, 0x14, 0x33, 0xcc, 0xb0, 0xa4, 0xaa, 0x6e, 0x2a, 0xf6, 0xd4, 0x4d, 0x7d, 0x47, 0xe9, 0xa6, - 0x25, 0x98, 0x8c, 0x83, 0x71, 0x99, 0xb1, 0x08, 0xd9, 0xf1, 0xb9, 0x91, 0x44, 0xe2, 0x34, 0xbd, - 0x4d, 0xe0, 0x82, 0x8a, 0x49, 0x24, 0x4e, 0x34, 0xe6, 0x0f, 0xae, 0x1b, 0xa7, 0xbb, 0xf4, 0x53, - 0x6c, 0x2d, 0x2e, 0xe4, 0x5a, 0x8b, 0x7f, 0xc2, 0x82, 0x53, 0xc9, 0x7a, 0xd8, 0xe3, 0xe9, 0xcf, - 0x5b, 0x70, 0x9a, 0xd9, 0xcc, 0x59, 0xad, 0x69, 0x0b, 0xfd, 0x0b, 0x1d, 0xe3, 0x2c, 0xe5, 0xb4, - 0x38, 0x0e, 0x58, 0xb2, 0x96, 0xc5, 0x1a, 0x67, 0xd7, 0x68, 0xff, 0xaf, 0x3e, 0x98, 0xce, 0x0b, - 0xd0, 0xc4, 0x9e, 0x8b, 0x38, 0x77, 0xab, 0xbb, 0xe4, 0x8e, 0x70, 0xca, 0x8f, 0x9f, 0x8b, 0x70, - 0x30, 0x96, 0xf8, 0x64, 0x0a, 0x8e, 0x42, 0x8f, 0x29, 0x38, 0x76, 0x60, 0xf2, 0xce, 0x0e, 0xf1, - 0x6e, 0x7a, 0xa1, 0x13, 0xb9, 0xe1, 0x96, 0xcb, 0xec, 0xcb, 0x7c, 0xde, 0xc8, 0xbc, 0xbd, 0x93, - 0xb7, 0x93, 0x04, 0x87, 0x07, 0xb3, 0xe7, 0x0c, 0x40, 0xdc, 0x64, 0xbe, 0x91, 0xe0, 0x34, 0xd3, - 0x74, 0x06, 0x93, 0xbe, 0x07, 0x9c, 0xc1, 0xa4, 0xe9, 0x0a, 0xaf, 0x14, 0xf9, 0x16, 0x80, 0xdd, - 0x1c, 0xd7, 0x14, 0x14, 0x6b, 0x14, 0xe8, 0x13, 0x80, 0xf4, 0x0c, 0x4d, 0x46, 0x7c, 0xcc, 0x67, - 0xef, 0x1d, 0xcc, 0xa2, 0xf5, 0x14, 0xf6, 0xf0, 0x60, 0x76, 0x8a, 0x42, 0xcb, 0x1e, 0xbd, 0x81, - 0xc6, 0x41, 0xc5, 0x32, 0x18, 0xa1, 0xdb, 0x30, 0x41, 0xa1, 0x6c, 0x45, 0xc9, 0xe0, 0x9b, 0xfc, - 0xd6, 0xf8, 0xf4, 0xbd, 0x83, 0xd9, 0x89, 0xf5, 0x04, 0x2e, 0x8f, 0x75, 0x8a, 0x09, 0x7a, 0x19, - 0xc6, 0xe2, 0x79, 0x75, 0x8d, 0xec, 0xf3, 0x60, 0x37, 0x25, 0xae, 0xf8, 0x5e, 0x33, 0x30, 0x38, - 0x41, 0x69, 0x7f, 0xde, 0x82, 0xb3, 0xb9, 0x59, 0xc4, 0xd1, 0x25, 0x18, 0x72, 0x5a, 0x2e, 0x37, - 0x63, 0x88, 0xa3, 0x86, 0xa9, 0xcb, 0x2a, 0x65, 0x6e, 0xc4, 0x50, 0x58, 0xba, 0xc3, 0xef, 0xba, - 0x5e, 0x3d, 0xb9, 0xc3, 0x5f, 0x73, 0xbd, 0x3a, 0x66, 0x18, 0x75, 0x64, 0x15, 0x73, 0x9f, 0x24, - 0x7c, 0x85, 0xae, 0xd5, 0x8c, 0x7c, 0xe3, 0x27, 0xdb, 0x0c, 0xf4, 0xb4, 0x6e, 0x72, 0x14, 0xde, - 0x85, 0xb9, 0xe6, 0xc6, 0xef, 0xb3, 0x40, 0x3c, 0x61, 0xee, 0xe1, 0x4c, 0xfe, 0x18, 0x8c, 0xec, - 0xa5, 0xb3, 0xd7, 0x5d, 0xc8, 0x7f, 0xd3, 0x2d, 0xa2, 0x7e, 0x2b, 0x41, 0xdb, 0xc8, 0x54, 0x67, - 0xf0, 0xb2, 0xeb, 0x20, 0xb0, 0xcb, 0x84, 0x19, 0x16, 0xba, 0xb7, 0xe6, 0x39, 0x80, 0x3a, 0xa3, - 0x65, 0x29, 0x6d, 0x0b, 0xa6, 0xc4, 0xb5, 0xac, 0x30, 0x58, 0xa3, 0xb2, 0xff, 0x75, 0x01, 0x86, - 0x65, 0xb6, 0xb4, 0xb6, 0xd7, 0x8b, 0xfa, 0xef, 0x48, 0xe9, 0x93, 0xd1, 0x3c, 0x94, 0x98, 0x7e, - 0xba, 0x12, 0x6b, 0x4d, 0x95, 0x76, 0x68, 0x4d, 0x22, 0x70, 0x4c, 0x43, 0x77, 0xc7, 0xb0, 0xbd, - 0xc9, 0xc8, 0x13, 0x0f, 0x6e, 0xab, 0x1c, 0x8c, 0x25, 0x1e, 0x7d, 0x04, 0x26, 0x78, 0xb9, 0xc0, - 0x6f, 0x39, 0xdb, 0xdc, 0xa6, 0xd5, 0xaf, 0xa2, 0x98, 0x4c, 0xac, 0x25, 0x70, 0x87, 0x07, 0xb3, - 0xa7, 0x92, 0x30, 0x66, 0xac, 0x4d, 0x71, 0x61, 0xae, 0x6b, 0xbc, 0x12, 0xba, 0xab, 0xa7, 0x3c, - 0xde, 0x62, 0x14, 0xd6, 0xe9, 0xec, 0x4f, 0x01, 0x4a, 0xe7, 0x8d, 0x43, 0xaf, 0x71, 0xd7, 0x67, - 0x37, 0x20, 0xf5, 0x4e, 0xc6, 0x5b, 0x3d, 0x56, 0x87, 0x7c, 0x2b, 0xc7, 0x4b, 0x61, 0x55, 0xde, - 0xfe, 0x0b, 0x45, 0x98, 0x48, 0x46, 0x07, 0x40, 0x57, 0x61, 0x80, 0x8b, 0x94, 0x82, 0x7d, 0x07, - 0xdf, 0x20, 0x2d, 0xa6, 0x00, 0x3b, 0x5c, 0x85, 0x54, 0x2a, 0xca, 0xa3, 0x37, 0x60, 0xb8, 0xee, - 0xdf, 0xf1, 0xee, 0x38, 0x41, 0x7d, 0xa1, 0x52, 0x16, 0xd3, 0x39, 0x53, 0x59, 0xb1, 0x1c, 0x93, - 0xe9, 0x71, 0x0a, 0x98, 0x1d, 0x3c, 0x46, 0x61, 0x9d, 0x1d, 0xda, 0x60, 0xc9, 0x26, 0xb6, 0xdc, - 0xed, 0x35, 0xa7, 0xd5, 0xe9, 0x1d, 0xcc, 0x92, 0x24, 0xd2, 0x38, 0x8f, 0x8a, 0x8c, 0x14, 0x1c, - 0x81, 0x63, 0x46, 0xe8, 0x33, 0x30, 0x15, 0xe6, 0x98, 0x50, 0xf2, 0xd2, 0x88, 0x76, 0xb2, 0x2a, - 0x2c, 0x3e, 0x74, 0xef, 0x60, 0x76, 0x2a, 0xcb, 0xd8, 0x92, 0x55, 0x8d, 0xfd, 0xc5, 0x53, 0x60, - 0x2c, 0x62, 0x23, 0xab, 0xb4, 0x75, 0x4c, 0x59, 0xa5, 0x31, 0x0c, 0x91, 0x66, 0x2b, 0xda, 0x5f, - 0x76, 0x03, 0x31, 0x26, 0x99, 0x3c, 0x57, 0x04, 0x4d, 0x9a, 0xa7, 0xc4, 0x60, 0xc5, 0x27, 0x3b, - 0xf5, 0x77, 0xf1, 0x9b, 0x98, 0xfa, 0xbb, 0xef, 0x04, 0x53, 0x7f, 0xaf, 0xc3, 0xe0, 0xb6, 0x1b, - 0x61, 0xd2, 0xf2, 0xc5, 0x65, 0x2e, 0x73, 0x1e, 0x5e, 0xe1, 0x24, 0xe9, 0x24, 0xb3, 0x02, 0x81, - 0x25, 0x13, 0xf4, 0x9a, 0x5a, 0x81, 0x03, 0xf9, 0x0a, 0x97, 0xb4, 0x13, 0x4b, 0xe6, 0x1a, 0x14, - 0x09, 0xbe, 0x07, 0xef, 0x37, 0xc1, 0xf7, 0xaa, 0x4c, 0xcb, 0x3d, 0x94, 0xff, 0x68, 0x8d, 0x65, - 0xdd, 0xee, 0x92, 0x8c, 0xfb, 0x96, 0x9e, 0xca, 0xbc, 0x94, 0xbf, 0x13, 0xa8, 0x2c, 0xe5, 0x3d, - 0x26, 0x30, 0xff, 0x3e, 0x0b, 0x4e, 0xb7, 0xb2, 0xb2, 0xfa, 0x0b, 0x7f, 0x8f, 0x17, 0x7b, 0xc9, - 0xfd, 0xca, 0x0a, 0x18, 0x15, 0x32, 0x3d, 0x69, 0x26, 0x19, 0xce, 0xae, 0x8e, 0x76, 0x74, 0xb0, - 0x59, 0x17, 0x7e, 0x07, 0x8f, 0xe5, 0x64, 0x42, 0xef, 0x90, 0xff, 0x7c, 0x23, 0x23, 0xeb, 0xf6, - 0xe3, 0x79, 0x59, 0xb7, 0x7b, 0xce, 0xb5, 0xfd, 0x9a, 0xca, 0x81, 0x3e, 0x9a, 0x3f, 0x95, 0x78, - 0x86, 0xf3, 0xae, 0x99, 0xcf, 0x5f, 0x53, 0x99, 0xcf, 0x3b, 0x84, 0xd7, 0xe6, 0x79, 0xcd, 0xbb, - 0xe6, 0x3b, 0xd7, 0x72, 0x96, 0x8f, 0x1f, 0x4f, 0xce, 0x72, 0xe3, 0xa8, 0xe1, 0x69, 0xb3, 0x9f, - 0xee, 0x72, 0xd4, 0x18, 0x7c, 0x3b, 0x1f, 0x36, 0x3c, 0x3f, 0xfb, 0xe4, 0x7d, 0xe5, 0x67, 0xbf, - 0xa5, 0xe7, 0x3b, 0x47, 0x5d, 0x12, 0x7a, 0x53, 0xa2, 0x1e, 0xb3, 0x9c, 0xdf, 0xd2, 0x0f, 0xc0, - 0xa9, 0x7c, 0xbe, 0xea, 0x9c, 0x4b, 0xf3, 0xcd, 0x3c, 0x02, 0x53, 0xd9, 0xd3, 0x4f, 0x9d, 0x4c, - 0xf6, 0xf4, 0xd3, 0xc7, 0x9e, 0x3d, 0xfd, 0xcc, 0x09, 0x64, 0x4f, 0x7f, 0xe8, 0x04, 0xb3, 0xa7, - 0xdf, 0x62, 0x4e, 0x52, 0x3c, 0x10, 0x94, 0x08, 0x07, 0xfe, 0x54, 0x4e, 0x1c, 0xb5, 0x74, 0xb4, - 0x28, 0xfe, 0x71, 0x0a, 0x85, 0x63, 0x56, 0x19, 0x59, 0xd9, 0xa7, 0x1f, 0x40, 0x56, 0xf6, 0xf5, - 0x38, 0x2b, 0xfb, 0xd9, 0xfc, 0xa1, 0xce, 0x78, 0x56, 0x93, 0x93, 0x8b, 0xfd, 0x96, 0x9e, 0x43, - 0xfd, 0xe1, 0x0e, 0x96, 0xb0, 0x2c, 0x85, 0x72, 0x87, 0xcc, 0xe9, 0xaf, 0xf2, 0xcc, 0xe9, 0x8f, - 0xe4, 0xef, 0xe4, 0xc9, 0xe3, 0xce, 0xc8, 0x97, 0x4e, 0xdb, 0xa5, 0x02, 0xa9, 0xb2, 0xc0, 0xe7, - 0x39, 0xed, 0x52, 0x91, 0x58, 0xd3, 0xed, 0x52, 0x28, 0x1c, 0xb3, 0xb2, 0x7f, 0xa0, 0x00, 0xe7, - 0x3b, 0xaf, 0xb7, 0x58, 0x4b, 0x5e, 0x89, 0x1d, 0x03, 0x12, 0x5a, 0x72, 0x7e, 0x67, 0x8b, 0xa9, - 0x7a, 0x8e, 0x0b, 0x79, 0x05, 0x26, 0xd5, 0x7b, 0x9c, 0x86, 0x5b, 0xdb, 0x5f, 0x8f, 0xaf, 0xc9, - 0x2a, 0x82, 0x42, 0x35, 0x49, 0x80, 0xd3, 0x65, 0xd0, 0x02, 0x8c, 0x1b, 0xc0, 0xf2, 0xb2, 0xb8, - 0x9b, 0xc5, 0xa1, 0xb6, 0x4d, 0x34, 0x4e, 0xd2, 0xdb, 0x5f, 0xb2, 0xe0, 0xa1, 0x9c, 0xb4, 0xa3, - 0x3d, 0x87, 0x3d, 0xdc, 0x82, 0xf1, 0x96, 0x59, 0xb4, 0x4b, 0xa4, 0x56, 0x23, 0xb9, 0xa9, 0x6a, - 0x6b, 0x02, 0x81, 0x93, 0x4c, 0xed, 0x9f, 0x2e, 0xc0, 0xb9, 0x8e, 0x0e, 0xa6, 0x08, 0xc3, 0x99, - 0xed, 0x66, 0xe8, 0x2c, 0x05, 0xa4, 0x4e, 0xbc, 0xc8, 0x75, 0x1a, 0xd5, 0x16, 0xa9, 0x69, 0x76, - 0x0e, 0xe6, 0xa9, 0x79, 0x65, 0xad, 0xba, 0x90, 0xa6, 0xc0, 0x39, 0x25, 0xd1, 0x2a, 0xa0, 0x34, - 0x46, 0x8c, 0x30, 0x0b, 0xa1, 0x9f, 0xe6, 0x87, 0x33, 0x4a, 0xa0, 0x0f, 0xc0, 0xa8, 0x72, 0x5c, - 0xd5, 0x46, 0x9c, 0x6d, 0xec, 0x58, 0x47, 0x60, 0x93, 0x0e, 0x5d, 0xe6, 0x39, 0x18, 0x44, 0xb6, - 0x0e, 0x61, 0x14, 0x19, 0x97, 0x09, 0x16, 0x04, 0x18, 0xeb, 0x34, 0x8b, 0x2f, 0xfd, 0xda, 0xef, - 0x9d, 0x7f, 0xcf, 0x6f, 0xfc, 0xde, 0xf9, 0xf7, 0xfc, 0xf6, 0xef, 0x9d, 0x7f, 0xcf, 0x77, 0xdd, - 0x3b, 0x6f, 0xfd, 0xda, 0xbd, 0xf3, 0xd6, 0x6f, 0xdc, 0x3b, 0x6f, 0xfd, 0xf6, 0xbd, 0xf3, 0xd6, - 0xef, 0xde, 0x3b, 0x6f, 0x7d, 0xe1, 0xf7, 0xcf, 0xbf, 0xe7, 0x63, 0x28, 0x0e, 0x24, 0x3a, 0x4f, - 0x47, 0x67, 0x7e, 0xef, 0xf2, 0xff, 0x0f, 0x00, 0x00, 0xff, 0xff, 0x52, 0x56, 0xa0, 0x3b, 0xf7, - 0x0c, 0x01, 0x00, + // 14685 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x69, 0x90, 0x5c, 0xd7, + 0x75, 0x18, 0xac, 0xd7, 0x3d, 0x5b, 0x9f, 0xd9, 0xef, 0x00, 0xe0, 0x60, 0x48, 0xa0, 0xc1, 0x47, + 0x12, 0x04, 0x45, 0x72, 0x20, 0x70, 0x91, 0x28, 0x52, 0xa2, 0x35, 0x2b, 0x30, 0x04, 0x66, 0xd0, + 0xbc, 0x3d, 0x00, 0x24, 0x8a, 0x52, 0xe9, 0x4d, 0xf7, 0x9d, 0x99, 0xa7, 0xe9, 0x7e, 0xaf, 0xf9, + 0xde, 0xeb, 0x01, 0x06, 0x9f, 0x54, 0x9f, 0x2d, 0xc7, 0x8b, 0x6c, 0x27, 0xa5, 0x4a, 0x39, 0x4b, + 0xc9, 0x2e, 0x57, 0xca, 0x76, 0x6c, 0x2b, 0xca, 0xa6, 0xc8, 0xb1, 0x1d, 0xcb, 0x5b, 0xb6, 0x8a, + 0x93, 0x4a, 0x39, 0x8e, 0xab, 0x62, 0xb9, 0xe2, 0xca, 0xc4, 0x82, 0x53, 0xe5, 0x52, 0x55, 0x62, + 0x3b, 0xcb, 0x8f, 0x64, 0xe2, 0xc4, 0xa9, 0xbb, 0xbe, 0x7b, 0xdf, 0xd2, 0xdd, 0x03, 0x0e, 0x46, + 0x94, 0x8a, 0xff, 0xba, 0xcf, 0x39, 0xf7, 0xdc, 0xfb, 0xee, 0x7a, 0xee, 0x39, 0xe7, 0x9e, 0x03, + 0xaf, 0xec, 0xbc, 0x14, 0xce, 0xba, 0xfe, 0xc5, 0x9d, 0xf6, 0x06, 0x09, 0x3c, 0x12, 0x91, 0xf0, + 0xe2, 0x2e, 0xf1, 0xea, 0x7e, 0x70, 0x51, 0x20, 0x9c, 0x96, 0x7b, 0xb1, 0xe6, 0x07, 0xe4, 0xe2, + 0xee, 0xa5, 0x8b, 0x5b, 0xc4, 0x23, 0x81, 0x13, 0x91, 0xfa, 0x6c, 0x2b, 0xf0, 0x23, 0x1f, 0x21, + 0x4e, 0x33, 0xeb, 0xb4, 0xdc, 0x59, 0x4a, 0x33, 0xbb, 0x7b, 0x69, 0xe6, 0xd9, 0x2d, 0x37, 0xda, + 0x6e, 0x6f, 0xcc, 0xd6, 0xfc, 0xe6, 0xc5, 0x2d, 0x7f, 0xcb, 0xbf, 0xc8, 0x48, 0x37, 0xda, 0x9b, + 0xec, 0x1f, 0xfb, 0xc3, 0x7e, 0x71, 0x16, 0x33, 0x2f, 0xc4, 0xd5, 0x34, 0x9d, 0xda, 0xb6, 0xeb, + 0x91, 0x60, 0xef, 0x62, 0x6b, 0x67, 0x8b, 0xd5, 0x1b, 0x90, 0xd0, 0x6f, 0x07, 0x35, 0x92, 0xac, + 0xb8, 0x63, 0xa9, 0xf0, 0x62, 0x93, 0x44, 0x4e, 0x46, 0x73, 0x67, 0x2e, 0xe6, 0x95, 0x0a, 0xda, + 0x5e, 0xe4, 0x36, 0xd3, 0xd5, 0xbc, 0xbf, 0x5b, 0x81, 0xb0, 0xb6, 0x4d, 0x9a, 0x4e, 0xaa, 0xdc, + 0xf3, 0x79, 0xe5, 0xda, 0x91, 0xdb, 0xb8, 0xe8, 0x7a, 0x51, 0x18, 0x05, 0xc9, 0x42, 0xf6, 0xd7, + 0x2d, 0x38, 0x37, 0x77, 0xab, 0xba, 0xd4, 0x70, 0xc2, 0xc8, 0xad, 0xcd, 0x37, 0xfc, 0xda, 0x4e, + 0x35, 0xf2, 0x03, 0x72, 0xd3, 0x6f, 0xb4, 0x9b, 0xa4, 0xca, 0x3a, 0x02, 0x3d, 0x03, 0x43, 0xbb, + 0xec, 0xff, 0xca, 0xe2, 0xb4, 0x75, 0xce, 0xba, 0x50, 0x9a, 0x9f, 0xf8, 0xcd, 0xfd, 0xf2, 0x7b, + 0xee, 0xed, 0x97, 0x87, 0x6e, 0x0a, 0x38, 0x56, 0x14, 0xe8, 0x3c, 0x0c, 0x6c, 0x86, 0xeb, 0x7b, + 0x2d, 0x32, 0x5d, 0x60, 0xb4, 0x63, 0x82, 0x76, 0x60, 0xb9, 0x4a, 0xa1, 0x58, 0x60, 0xd1, 0x45, + 0x28, 0xb5, 0x9c, 0x20, 0x72, 0x23, 0xd7, 0xf7, 0xa6, 0x8b, 0xe7, 0xac, 0x0b, 0xfd, 0xf3, 0x93, + 0x82, 0xb4, 0x54, 0x91, 0x08, 0x1c, 0xd3, 0xd0, 0x66, 0x04, 0xc4, 0xa9, 0x5f, 0xf7, 0x1a, 0x7b, + 0xd3, 0x7d, 0xe7, 0xac, 0x0b, 0x43, 0x71, 0x33, 0xb0, 0x80, 0x63, 0x45, 0x61, 0x7f, 0xb1, 0x00, + 0x43, 0x73, 0x9b, 0x9b, 0xae, 0xe7, 0x46, 0x7b, 0xe8, 0x26, 0x8c, 0x78, 0x7e, 0x9d, 0xc8, 0xff, + 0xec, 0x2b, 0x86, 0x9f, 0x3b, 0x37, 0x9b, 0x9e, 0x4a, 0xb3, 0x6b, 0x1a, 0xdd, 0xfc, 0xc4, 0xbd, + 0xfd, 0xf2, 0x88, 0x0e, 0xc1, 0x06, 0x1f, 0x84, 0x61, 0xb8, 0xe5, 0xd7, 0x15, 0xdb, 0x02, 0x63, + 0x5b, 0xce, 0x62, 0x5b, 0x89, 0xc9, 0xe6, 0xc7, 0xef, 0xed, 0x97, 0x87, 0x35, 0x00, 0xd6, 0x99, + 0xa0, 0x0d, 0x18, 0xa7, 0x7f, 0xbd, 0xc8, 0x55, 0x7c, 0x8b, 0x8c, 0xef, 0x63, 0x79, 0x7c, 0x35, + 0xd2, 0xf9, 0xa9, 0x7b, 0xfb, 0xe5, 0xf1, 0x04, 0x10, 0x27, 0x19, 0xda, 0x77, 0x61, 0x6c, 0x2e, + 0x8a, 0x9c, 0xda, 0x36, 0xa9, 0xf3, 0x11, 0x44, 0x2f, 0x40, 0x9f, 0xe7, 0x34, 0x89, 0x18, 0xdf, + 0x73, 0xa2, 0x63, 0xfb, 0xd6, 0x9c, 0x26, 0x39, 0xd8, 0x2f, 0x4f, 0xdc, 0xf0, 0xdc, 0xb7, 0xda, + 0x62, 0x56, 0x50, 0x18, 0x66, 0xd4, 0xe8, 0x39, 0x80, 0x3a, 0xd9, 0x75, 0x6b, 0xa4, 0xe2, 0x44, + 0xdb, 0x62, 0xbc, 0x91, 0x28, 0x0b, 0x8b, 0x0a, 0x83, 0x35, 0x2a, 0xfb, 0x0e, 0x94, 0xe6, 0x76, + 0x7d, 0xb7, 0x5e, 0xf1, 0xeb, 0x21, 0xda, 0x81, 0xf1, 0x56, 0x40, 0x36, 0x49, 0xa0, 0x40, 0xd3, + 0xd6, 0xb9, 0xe2, 0x85, 0xe1, 0xe7, 0x2e, 0x64, 0x7e, 0xac, 0x49, 0xba, 0xe4, 0x45, 0xc1, 0xde, + 0xfc, 0x43, 0xa2, 0xbe, 0xf1, 0x04, 0x16, 0x27, 0x39, 0xdb, 0xff, 0xac, 0x00, 0x27, 0xe7, 0xee, + 0xb6, 0x03, 0xb2, 0xe8, 0x86, 0x3b, 0xc9, 0x19, 0x5e, 0x77, 0xc3, 0x9d, 0xb5, 0xb8, 0x07, 0xd4, + 0xd4, 0x5a, 0x14, 0x70, 0xac, 0x28, 0xd0, 0xb3, 0x30, 0x48, 0x7f, 0xdf, 0xc0, 0x2b, 0xe2, 0x93, + 0xa7, 0x04, 0xf1, 0xf0, 0xa2, 0x13, 0x39, 0x8b, 0x1c, 0x85, 0x25, 0x0d, 0x5a, 0x85, 0xe1, 0x1a, + 0x5b, 0x90, 0x5b, 0xab, 0x7e, 0x9d, 0xb0, 0xc1, 0x2c, 0xcd, 0x3f, 0x4d, 0xc9, 0x17, 0x62, 0xf0, + 0xc1, 0x7e, 0x79, 0x9a, 0xb7, 0x4d, 0xb0, 0xd0, 0x70, 0x58, 0x2f, 0x8f, 0x6c, 0xb5, 0xbe, 0xfa, + 0x18, 0x27, 0xc8, 0x58, 0x5b, 0x17, 0xb4, 0xa5, 0xd2, 0xcf, 0x96, 0xca, 0x48, 0xf6, 0x32, 0x41, + 0x97, 0xa0, 0x6f, 0xc7, 0xf5, 0xea, 0xd3, 0x03, 0x8c, 0xd7, 0x19, 0x3a, 0xe6, 0x57, 0x5d, 0xaf, + 0x7e, 0xb0, 0x5f, 0x9e, 0x34, 0x9a, 0x43, 0x81, 0x98, 0x91, 0xda, 0xff, 0xdd, 0x82, 0x32, 0xc3, + 0x2d, 0xbb, 0x0d, 0x52, 0x21, 0x41, 0xe8, 0x86, 0x11, 0xf1, 0x22, 0xa3, 0x43, 0x9f, 0x03, 0x08, + 0x49, 0x2d, 0x20, 0x91, 0xd6, 0xa5, 0x6a, 0x62, 0x54, 0x15, 0x06, 0x6b, 0x54, 0x74, 0x43, 0x08, + 0xb7, 0x9d, 0x80, 0xcd, 0x2f, 0xd1, 0xb1, 0x6a, 0x43, 0xa8, 0x4a, 0x04, 0x8e, 0x69, 0x8c, 0x0d, + 0xa1, 0xd8, 0x6d, 0x43, 0x40, 0x1f, 0x86, 0xf1, 0xb8, 0xb2, 0xb0, 0xe5, 0xd4, 0x64, 0x07, 0xb2, + 0x25, 0x53, 0x35, 0x51, 0x38, 0x49, 0x6b, 0xff, 0x2d, 0x4b, 0x4c, 0x1e, 0xfa, 0xd5, 0xef, 0xf0, + 0x6f, 0xb5, 0x7f, 0xc9, 0x82, 0xc1, 0x79, 0xd7, 0xab, 0xbb, 0xde, 0x16, 0xfa, 0x14, 0x0c, 0xd1, + 0xb3, 0xa9, 0xee, 0x44, 0x8e, 0xd8, 0xf7, 0xde, 0xa7, 0xad, 0x2d, 0x75, 0x54, 0xcc, 0xb6, 0x76, + 0xb6, 0x28, 0x20, 0x9c, 0xa5, 0xd4, 0x74, 0xb5, 0x5d, 0xdf, 0xf8, 0x34, 0xa9, 0x45, 0xab, 0x24, + 0x72, 0xe2, 0xcf, 0x89, 0x61, 0x58, 0x71, 0x45, 0x57, 0x61, 0x20, 0x72, 0x82, 0x2d, 0x12, 0x89, + 0x0d, 0x30, 0x73, 0xa3, 0xe2, 0x25, 0x31, 0x5d, 0x91, 0xc4, 0xab, 0x91, 0xf8, 0x58, 0x58, 0x67, + 0x45, 0xb1, 0x60, 0x61, 0xff, 0x9f, 0x41, 0x38, 0xbd, 0x50, 0x5d, 0xc9, 0x99, 0x57, 0xe7, 0x61, + 0xa0, 0x1e, 0xb8, 0xbb, 0x24, 0x10, 0xfd, 0xac, 0xb8, 0x2c, 0x32, 0x28, 0x16, 0x58, 0xf4, 0x12, + 0x8c, 0xf0, 0x03, 0xe9, 0x8a, 0xe3, 0xd5, 0x1b, 0xb2, 0x8b, 0x4f, 0x08, 0xea, 0x91, 0x9b, 0x1a, + 0x0e, 0x1b, 0x94, 0x87, 0x9c, 0x54, 0xe7, 0x13, 0x8b, 0x31, 0xef, 0xb0, 0xfb, 0xbc, 0x05, 0x13, + 0xbc, 0x9a, 0xb9, 0x28, 0x0a, 0xdc, 0x8d, 0x76, 0x44, 0xc2, 0xe9, 0x7e, 0xb6, 0xd3, 0x2d, 0x64, + 0xf5, 0x56, 0x6e, 0x0f, 0xcc, 0xde, 0x4c, 0x70, 0xe1, 0x9b, 0xe0, 0xb4, 0xa8, 0x77, 0x22, 0x89, + 0xc6, 0xa9, 0x6a, 0xd1, 0xf7, 0x5a, 0x30, 0x53, 0xf3, 0xbd, 0x28, 0xf0, 0x1b, 0x0d, 0x12, 0x54, + 0xda, 0x1b, 0x0d, 0x37, 0xdc, 0xe6, 0xf3, 0x14, 0x93, 0x4d, 0xb6, 0x13, 0xe4, 0x8c, 0xa1, 0x22, + 0x12, 0x63, 0x78, 0xf6, 0xde, 0x7e, 0x79, 0x66, 0x21, 0x97, 0x15, 0xee, 0x50, 0x0d, 0xda, 0x01, + 0x44, 0x8f, 0xd2, 0x6a, 0xe4, 0x6c, 0x91, 0xb8, 0xf2, 0xc1, 0xde, 0x2b, 0x3f, 0x75, 0x6f, 0xbf, + 0x8c, 0xd6, 0x52, 0x2c, 0x70, 0x06, 0x5b, 0xf4, 0x16, 0x9c, 0xa0, 0xd0, 0xd4, 0xb7, 0x0e, 0xf5, + 0x5e, 0xdd, 0xf4, 0xbd, 0xfd, 0xf2, 0x89, 0xb5, 0x0c, 0x26, 0x38, 0x93, 0x35, 0xfa, 0x6e, 0x0b, + 0x4e, 0xc7, 0x9f, 0xbf, 0x74, 0xa7, 0xe5, 0x78, 0xf5, 0xb8, 0xe2, 0x52, 0xef, 0x15, 0xd3, 0x3d, + 0xf9, 0xf4, 0x42, 0x1e, 0x27, 0x9c, 0x5f, 0x09, 0xf2, 0x60, 0x8a, 0x36, 0x2d, 0x59, 0x37, 0xf4, + 0x5e, 0xf7, 0x43, 0xf7, 0xf6, 0xcb, 0x53, 0x6b, 0x69, 0x1e, 0x38, 0x8b, 0xf1, 0xcc, 0x02, 0x9c, + 0xcc, 0x9c, 0x9d, 0x68, 0x02, 0x8a, 0x3b, 0x84, 0x4b, 0x5d, 0x25, 0x4c, 0x7f, 0xa2, 0x13, 0xd0, + 0xbf, 0xeb, 0x34, 0xda, 0x62, 0x61, 0x62, 0xfe, 0xe7, 0xe5, 0xc2, 0x4b, 0x96, 0xfd, 0xcf, 0x8b, + 0x30, 0xbe, 0x50, 0x5d, 0xb9, 0xaf, 0x55, 0xaf, 0x1f, 0x7b, 0x85, 0x8e, 0xc7, 0x5e, 0x7c, 0x88, + 0x16, 0x73, 0x0f, 0xd1, 0xff, 0x3f, 0x63, 0xc9, 0xf6, 0xb1, 0x25, 0xfb, 0xc1, 0x9c, 0x25, 0x7b, + 0xc4, 0x0b, 0x75, 0x37, 0x67, 0xd6, 0xf6, 0xb3, 0x01, 0xcc, 0x94, 0x90, 0xae, 0xf9, 0x35, 0xa7, + 0x91, 0xdc, 0x6a, 0x0f, 0x39, 0x75, 0x8f, 0x66, 0x1c, 0x6b, 0x30, 0xb2, 0xe0, 0xb4, 0x9c, 0x0d, + 0xb7, 0xe1, 0x46, 0x2e, 0x09, 0xd1, 0x93, 0x50, 0x74, 0xea, 0x75, 0x26, 0xdd, 0x95, 0xe6, 0x4f, + 0xde, 0xdb, 0x2f, 0x17, 0xe7, 0xea, 0x54, 0xcc, 0x00, 0x45, 0xb5, 0x87, 0x29, 0x05, 0x7a, 0x2f, + 0xf4, 0xd5, 0x03, 0xbf, 0x35, 0x5d, 0x60, 0x94, 0x74, 0x95, 0xf7, 0x2d, 0x06, 0x7e, 0x2b, 0x41, + 0xca, 0x68, 0xec, 0xdf, 0x28, 0xc0, 0x23, 0x0b, 0xa4, 0xb5, 0xbd, 0x5c, 0xcd, 0x39, 0x2f, 0x2e, + 0xc0, 0x50, 0xd3, 0xf7, 0xdc, 0xc8, 0x0f, 0x42, 0x51, 0x35, 0x9b, 0x11, 0xab, 0x02, 0x86, 0x15, + 0x16, 0x9d, 0x83, 0xbe, 0x56, 0x2c, 0xc4, 0x8e, 0x48, 0x01, 0x98, 0x89, 0xaf, 0x0c, 0x43, 0x29, + 0xda, 0x21, 0x09, 0xc4, 0x8c, 0x51, 0x14, 0x37, 0x42, 0x12, 0x60, 0x86, 0x89, 0x25, 0x01, 0x2a, + 0x23, 0x88, 0x13, 0x21, 0x21, 0x09, 0x50, 0x0c, 0xd6, 0xa8, 0x50, 0x05, 0x4a, 0x61, 0x62, 0x64, + 0x7b, 0x5a, 0x9a, 0xa3, 0x4c, 0x54, 0x50, 0x23, 0x19, 0x33, 0x31, 0x4e, 0xb0, 0x81, 0xae, 0xa2, + 0xc2, 0xd7, 0x0a, 0x80, 0x78, 0x17, 0x7e, 0x9b, 0x75, 0xdc, 0x8d, 0x74, 0xc7, 0xf5, 0xbe, 0x24, + 0x8e, 0xaa, 0xf7, 0xfe, 0x87, 0x05, 0x8f, 0x2c, 0xb8, 0x5e, 0x9d, 0x04, 0x39, 0x13, 0xf0, 0xc1, + 0xdc, 0x9d, 0x0f, 0x27, 0xa4, 0x18, 0x53, 0xac, 0xef, 0x08, 0xa6, 0x98, 0xfd, 0x27, 0x16, 0x20, + 0xfe, 0xd9, 0xef, 0xb8, 0x8f, 0xbd, 0x91, 0xfe, 0xd8, 0x23, 0x98, 0x16, 0xf6, 0xdf, 0xb3, 0x60, + 0x78, 0xa1, 0xe1, 0xb8, 0x4d, 0xf1, 0xa9, 0x0b, 0x30, 0x29, 0x15, 0x45, 0x0c, 0xac, 0xc9, 0xfe, + 0x74, 0x73, 0x9b, 0xc4, 0x49, 0x24, 0x4e, 0xd3, 0xa3, 0x8f, 0xc3, 0x69, 0x03, 0xb8, 0x4e, 0x9a, + 0xad, 0x86, 0x13, 0xe9, 0xb7, 0x02, 0x76, 0xfa, 0xe3, 0x3c, 0x22, 0x9c, 0x5f, 0xde, 0xbe, 0x06, + 0x63, 0x0b, 0x0d, 0x97, 0x78, 0xd1, 0x4a, 0x65, 0xc1, 0xf7, 0x36, 0xdd, 0x2d, 0xf4, 0x32, 0x8c, + 0x45, 0x6e, 0x93, 0xf8, 0xed, 0xa8, 0x4a, 0x6a, 0xbe, 0xc7, 0xee, 0xda, 0xd6, 0x85, 0xfe, 0x79, + 0x74, 0x6f, 0xbf, 0x3c, 0xb6, 0x6e, 0x60, 0x70, 0x82, 0xd2, 0xfe, 0x7d, 0x3a, 0xe2, 0x7e, 0xb3, + 0xe5, 0x7b, 0xc4, 0x8b, 0x16, 0x7c, 0xaf, 0xce, 0x75, 0x32, 0x2f, 0x43, 0x5f, 0x44, 0x47, 0x90, + 0x7f, 0xf9, 0x79, 0xb9, 0xb4, 0xe9, 0xb8, 0x1d, 0xec, 0x97, 0x4f, 0xa5, 0x4b, 0xb0, 0x91, 0x65, + 0x65, 0xd0, 0x07, 0x61, 0x20, 0x8c, 0x9c, 0xa8, 0x1d, 0x8a, 0x4f, 0x7d, 0x54, 0x8e, 0x7f, 0x95, + 0x41, 0x0f, 0xf6, 0xcb, 0xe3, 0xaa, 0x18, 0x07, 0x61, 0x51, 0x00, 0x3d, 0x05, 0x83, 0x4d, 0x12, + 0x86, 0xce, 0x96, 0x3c, 0xbf, 0xc7, 0x45, 0xd9, 0xc1, 0x55, 0x0e, 0xc6, 0x12, 0x8f, 0x1e, 0x83, + 0x7e, 0x12, 0x04, 0x7e, 0x20, 0x76, 0x95, 0x51, 0x41, 0xd8, 0xbf, 0x44, 0x81, 0x98, 0xe3, 0xec, + 0x7f, 0x63, 0xc1, 0xb8, 0x6a, 0x2b, 0xaf, 0xeb, 0x18, 0xee, 0x4d, 0x6f, 0x00, 0xd4, 0xe4, 0x07, + 0x86, 0xec, 0xbc, 0x1b, 0x7e, 0xee, 0x7c, 0xa6, 0x68, 0x91, 0xea, 0xc6, 0x98, 0xb3, 0x02, 0x85, + 0x58, 0xe3, 0x66, 0xff, 0xaa, 0x05, 0x53, 0x89, 0x2f, 0xba, 0xe6, 0x86, 0x11, 0x7a, 0x33, 0xf5, + 0x55, 0xb3, 0xbd, 0x7d, 0x15, 0x2d, 0xcd, 0xbe, 0x49, 0x2d, 0x3e, 0x09, 0xd1, 0xbe, 0xe8, 0x0a, + 0xf4, 0xbb, 0x11, 0x69, 0xca, 0x8f, 0x79, 0xac, 0xe3, 0xc7, 0xf0, 0x56, 0xc5, 0x23, 0xb2, 0x42, + 0x4b, 0x62, 0xce, 0xc0, 0xfe, 0x8d, 0x22, 0x94, 0xf8, 0xb4, 0x5d, 0x75, 0x5a, 0xc7, 0x30, 0x16, + 0x4f, 0x43, 0xc9, 0x6d, 0x36, 0xdb, 0x91, 0xb3, 0x21, 0x0e, 0xa0, 0x21, 0xbe, 0x19, 0xac, 0x48, + 0x20, 0x8e, 0xf1, 0x68, 0x05, 0xfa, 0x58, 0x53, 0xf8, 0x57, 0x3e, 0x99, 0xfd, 0x95, 0xa2, 0xed, + 0xb3, 0x8b, 0x4e, 0xe4, 0x70, 0xd9, 0x4f, 0x9d, 0x7c, 0x14, 0x84, 0x19, 0x0b, 0xe4, 0x00, 0x6c, + 0xb8, 0x9e, 0x13, 0xec, 0x51, 0xd8, 0x74, 0x91, 0x31, 0x7c, 0xb6, 0x33, 0xc3, 0x79, 0x45, 0xcf, + 0xd9, 0xaa, 0x0f, 0x8b, 0x11, 0x58, 0x63, 0x3a, 0xf3, 0x01, 0x28, 0x29, 0xe2, 0xc3, 0x88, 0x70, + 0x33, 0x1f, 0x86, 0xf1, 0x44, 0x5d, 0xdd, 0x8a, 0x8f, 0xe8, 0x12, 0xe0, 0x2f, 0xb3, 0x2d, 0x43, + 0xb4, 0x7a, 0xc9, 0xdb, 0x15, 0x3b, 0xe7, 0x5d, 0x38, 0xd1, 0xc8, 0xd8, 0x7b, 0xc5, 0xb8, 0xf6, + 0xbe, 0x57, 0x3f, 0x22, 0x3e, 0xfb, 0x44, 0x16, 0x16, 0x67, 0xd6, 0x41, 0xa5, 0x1a, 0xbf, 0x45, + 0x17, 0x88, 0xd3, 0xd0, 0x2f, 0x08, 0xd7, 0x05, 0x0c, 0x2b, 0x2c, 0xdd, 0xef, 0x4e, 0xa8, 0xc6, + 0x5f, 0x25, 0x7b, 0x55, 0xd2, 0x20, 0xb5, 0xc8, 0x0f, 0xbe, 0xa5, 0xcd, 0x3f, 0xc3, 0x7b, 0x9f, + 0x6f, 0x97, 0xc3, 0x82, 0x41, 0xf1, 0x2a, 0xd9, 0xe3, 0x43, 0xa1, 0x7f, 0x5d, 0xb1, 0xe3, 0xd7, + 0x7d, 0xc5, 0x82, 0x51, 0xf5, 0x75, 0xc7, 0xb0, 0x2f, 0xcc, 0x9b, 0xfb, 0xc2, 0x99, 0x8e, 0x13, + 0x3c, 0x67, 0x47, 0xf8, 0x5a, 0x01, 0x4e, 0x2b, 0x1a, 0x7a, 0x9b, 0xe1, 0x7f, 0xc4, 0xac, 0xba, + 0x08, 0x25, 0x4f, 0xe9, 0xf5, 0x2c, 0x53, 0xa1, 0x16, 0x6b, 0xf5, 0x62, 0x1a, 0x2a, 0x94, 0x7a, + 0xf1, 0x31, 0x3b, 0xa2, 0x2b, 0xbc, 0x85, 0x72, 0x7b, 0x1e, 0x8a, 0x6d, 0xb7, 0x2e, 0x0e, 0x98, + 0xf7, 0xc9, 0xde, 0xbe, 0xb1, 0xb2, 0x78, 0xb0, 0x5f, 0x7e, 0x34, 0xcf, 0xd8, 0x42, 0x4f, 0xb6, + 0x70, 0xf6, 0xc6, 0xca, 0x22, 0xa6, 0x85, 0xd1, 0x1c, 0x8c, 0xcb, 0x13, 0xfa, 0x26, 0x15, 0x10, + 0x7d, 0x4f, 0x9c, 0x43, 0x4a, 0x6b, 0x8d, 0x4d, 0x34, 0x4e, 0xd2, 0xa3, 0x45, 0x98, 0xd8, 0x69, + 0x6f, 0x90, 0x06, 0x89, 0xf8, 0x07, 0x5f, 0x25, 0x5c, 0xa7, 0x5b, 0x8a, 0xef, 0x92, 0x57, 0x13, + 0x78, 0x9c, 0x2a, 0x61, 0xff, 0x39, 0x3b, 0x0f, 0x44, 0xef, 0x55, 0x02, 0x9f, 0x4e, 0x2c, 0xca, + 0xfd, 0x5b, 0x39, 0x9d, 0x7b, 0x99, 0x15, 0x57, 0xc9, 0xde, 0xba, 0x4f, 0xef, 0x12, 0xd9, 0xb3, + 0xc2, 0x98, 0xf3, 0x7d, 0x1d, 0xe7, 0xfc, 0xcf, 0x17, 0xe0, 0xa4, 0xea, 0x01, 0x43, 0x6c, 0xfd, + 0x76, 0xef, 0x83, 0x4b, 0x30, 0x5c, 0x27, 0x9b, 0x4e, 0xbb, 0x11, 0x29, 0x03, 0x43, 0x3f, 0x37, + 0x32, 0x2d, 0xc6, 0x60, 0xac, 0xd3, 0x1c, 0xa2, 0xdb, 0xfe, 0xfd, 0x08, 0x3b, 0x88, 0x23, 0x87, + 0xce, 0x71, 0xb5, 0x6a, 0xac, 0xdc, 0x55, 0xf3, 0x18, 0xf4, 0xbb, 0x4d, 0x2a, 0x98, 0x15, 0x4c, + 0x79, 0x6b, 0x85, 0x02, 0x31, 0xc7, 0xa1, 0x27, 0x60, 0xb0, 0xe6, 0x37, 0x9b, 0x8e, 0x57, 0x67, + 0x47, 0x5e, 0x69, 0x7e, 0x98, 0xca, 0x6e, 0x0b, 0x1c, 0x84, 0x25, 0x0e, 0x3d, 0x02, 0x7d, 0x4e, + 0xb0, 0xc5, 0xb5, 0x2e, 0xa5, 0xf9, 0x21, 0x5a, 0xd3, 0x5c, 0xb0, 0x15, 0x62, 0x06, 0xa5, 0x97, + 0xc6, 0xdb, 0x7e, 0xb0, 0xe3, 0x7a, 0x5b, 0x8b, 0x6e, 0x20, 0x96, 0x84, 0x3a, 0x0b, 0x6f, 0x29, + 0x0c, 0xd6, 0xa8, 0xd0, 0x32, 0xf4, 0xb7, 0xfc, 0x20, 0x0a, 0xa7, 0x07, 0x58, 0x77, 0x3f, 0x9a, + 0xb3, 0x11, 0xf1, 0xaf, 0xad, 0xf8, 0x41, 0x14, 0x7f, 0x00, 0xfd, 0x17, 0x62, 0x5e, 0x1c, 0x5d, + 0x83, 0x41, 0xe2, 0xed, 0x2e, 0x07, 0x7e, 0x73, 0x7a, 0x2a, 0x9f, 0xd3, 0x12, 0x27, 0xe1, 0xd3, + 0x2c, 0x96, 0x51, 0x05, 0x18, 0x4b, 0x16, 0xe8, 0x83, 0x50, 0x24, 0xde, 0xee, 0xf4, 0x20, 0xe3, + 0x34, 0x93, 0xc3, 0xe9, 0xa6, 0x13, 0xc4, 0x7b, 0xfe, 0x92, 0xb7, 0x8b, 0x69, 0x19, 0xf4, 0x31, + 0x28, 0xc9, 0x0d, 0x23, 0x14, 0xea, 0xcc, 0xcc, 0x09, 0x2b, 0xb7, 0x19, 0x4c, 0xde, 0x6a, 0xbb, + 0x01, 0x69, 0x12, 0x2f, 0x0a, 0xe3, 0x1d, 0x52, 0x62, 0x43, 0x1c, 0x73, 0x43, 0x35, 0x18, 0x09, + 0x48, 0xe8, 0xde, 0x25, 0x15, 0xbf, 0xe1, 0xd6, 0xf6, 0xa6, 0x1f, 0x62, 0xcd, 0x7b, 0xaa, 0x63, + 0x97, 0x61, 0xad, 0x40, 0xac, 0x6e, 0xd7, 0xa1, 0xd8, 0x60, 0x8a, 0x3e, 0x26, 0x15, 0xf5, 0xab, + 0x7e, 0xdb, 0x8b, 0xc2, 0xe9, 0x12, 0xab, 0x24, 0xd3, 0x84, 0x7a, 0x33, 0xa6, 0x4b, 0x6a, 0xf2, + 0x79, 0x61, 0x6c, 0xb0, 0x42, 0x9f, 0x80, 0x51, 0xfe, 0x9f, 0x1b, 0x22, 0xc3, 0xe9, 0x93, 0x8c, + 0xf7, 0xb9, 0x7c, 0xde, 0x9c, 0x70, 0xfe, 0xa4, 0x60, 0x3e, 0xaa, 0x43, 0x43, 0x6c, 0x72, 0x43, + 0x18, 0x46, 0x1b, 0xee, 0x2e, 0xf1, 0x48, 0x18, 0x56, 0x02, 0x7f, 0x83, 0x08, 0xbd, 0xea, 0xe9, + 0x6c, 0xc3, 0xa5, 0xbf, 0x41, 0xe6, 0x27, 0x29, 0xcf, 0x6b, 0x7a, 0x19, 0x6c, 0xb2, 0x40, 0x37, + 0x60, 0x8c, 0x5e, 0x64, 0xdd, 0x98, 0xe9, 0x70, 0x37, 0xa6, 0xec, 0xf2, 0x86, 0x8d, 0x42, 0x38, + 0xc1, 0x04, 0x5d, 0x87, 0x91, 0x30, 0x72, 0x82, 0xa8, 0xdd, 0xe2, 0x4c, 0x4f, 0x75, 0x63, 0xca, + 0xec, 0xde, 0x55, 0xad, 0x08, 0x36, 0x18, 0xa0, 0xd7, 0xa0, 0xd4, 0x70, 0x37, 0x49, 0x6d, 0xaf, + 0xd6, 0x20, 0xd3, 0x23, 0x8c, 0x5b, 0xe6, 0xce, 0x75, 0x4d, 0x12, 0x71, 0x61, 0x5a, 0xfd, 0xc5, + 0x71, 0x71, 0x74, 0x13, 0x4e, 0x45, 0x24, 0x68, 0xba, 0x9e, 0x43, 0x77, 0x1c, 0x71, 0x7f, 0x63, + 0xf6, 0xe4, 0x51, 0xb6, 0xa4, 0xcf, 0x8a, 0xd1, 0x38, 0xb5, 0x9e, 0x49, 0x85, 0x73, 0x4a, 0xa3, + 0x3b, 0x30, 0x9d, 0x81, 0xe1, 0x53, 0xf9, 0x04, 0xe3, 0xfc, 0x21, 0xc1, 0x79, 0x7a, 0x3d, 0x87, + 0xee, 0xa0, 0x03, 0x0e, 0xe7, 0x72, 0x47, 0xd7, 0x61, 0x9c, 0x6d, 0x73, 0x95, 0x76, 0xa3, 0x21, + 0x2a, 0x1c, 0x63, 0x15, 0x3e, 0x21, 0x0f, 0xfd, 0x15, 0x13, 0x7d, 0xb0, 0x5f, 0x86, 0xf8, 0x1f, + 0x4e, 0x96, 0x46, 0x1b, 0xcc, 0x74, 0xd9, 0x0e, 0xdc, 0x68, 0x8f, 0xae, 0x34, 0x72, 0x27, 0x9a, + 0x1e, 0xef, 0xa8, 0xc6, 0xd1, 0x49, 0x95, 0x7d, 0x53, 0x07, 0xe2, 0x24, 0x43, 0xba, 0x6f, 0x87, + 0x51, 0xdd, 0xf5, 0xa6, 0x27, 0xf8, 0xe5, 0x47, 0x6e, 0x7b, 0x55, 0x0a, 0xc4, 0x1c, 0xc7, 0xcc, + 0x96, 0xf4, 0xc7, 0x75, 0x7a, 0x3c, 0x4e, 0x32, 0xc2, 0xd8, 0x6c, 0x29, 0x11, 0x38, 0xa6, 0xa1, + 0x12, 0x6b, 0x14, 0xed, 0x4d, 0x23, 0x46, 0xaa, 0x76, 0xaf, 0xf5, 0xf5, 0x8f, 0x61, 0x0a, 0xb7, + 0x37, 0x60, 0x4c, 0x6d, 0x1d, 0xac, 0x4f, 0x50, 0x19, 0xfa, 0x99, 0x8c, 0x26, 0x94, 0x8e, 0x25, + 0xda, 0x04, 0x26, 0xbf, 0x61, 0x0e, 0x67, 0x4d, 0x70, 0xef, 0x92, 0xf9, 0xbd, 0x88, 0x70, 0xc5, + 0x41, 0x51, 0x6b, 0x82, 0x44, 0xe0, 0x98, 0xc6, 0xfe, 0xbf, 0x5c, 0xd6, 0x8d, 0xb7, 0xf4, 0x1e, + 0x0e, 0xb1, 0x67, 0x60, 0x68, 0xdb, 0x0f, 0x23, 0x4a, 0xcd, 0xea, 0xe8, 0x8f, 0xa5, 0xdb, 0x2b, + 0x02, 0x8e, 0x15, 0x05, 0x7a, 0x05, 0x46, 0x6b, 0x7a, 0x05, 0xe2, 0x04, 0x56, 0xdb, 0x88, 0x51, + 0x3b, 0x36, 0x69, 0xd1, 0x4b, 0x30, 0xc4, 0x5c, 0x71, 0x6a, 0x7e, 0x43, 0x88, 0x86, 0x52, 0x8c, + 0x18, 0xaa, 0x08, 0xf8, 0x81, 0xf6, 0x1b, 0x2b, 0x6a, 0x74, 0x1e, 0x06, 0x68, 0x13, 0x56, 0x2a, + 0xe2, 0xec, 0x53, 0xfa, 0xb3, 0x2b, 0x0c, 0x8a, 0x05, 0xd6, 0xfe, 0x55, 0x8b, 0x09, 0x3e, 0xe9, + 0x0d, 0x1a, 0x5d, 0x61, 0x3b, 0x3c, 0xdb, 0xee, 0x35, 0xfd, 0xd5, 0xe3, 0xda, 0xb6, 0xad, 0x70, + 0x07, 0x89, 0xff, 0xd8, 0x28, 0x89, 0xde, 0x80, 0xd1, 0x80, 0xb0, 0x2d, 0x42, 0x4c, 0x78, 0x7e, + 0xfa, 0xbf, 0x20, 0xbb, 0x00, 0xeb, 0xc8, 0x83, 0xfd, 0xf2, 0xc3, 0xf1, 0x79, 0x44, 0xdb, 0x63, + 0xa0, 0xb1, 0xc9, 0xca, 0xfe, 0xcb, 0x05, 0x6d, 0x96, 0x54, 0x23, 0x27, 0x22, 0xa8, 0x02, 0x83, + 0xb7, 0x1d, 0x37, 0x72, 0xbd, 0x2d, 0x21, 0xa4, 0x75, 0x3e, 0x95, 0x58, 0xa1, 0x5b, 0xbc, 0x00, + 0x17, 0x35, 0xc4, 0x1f, 0x2c, 0xd9, 0x50, 0x8e, 0x41, 0xdb, 0xf3, 0x28, 0xc7, 0x42, 0xaf, 0x1c, + 0x31, 0x2f, 0xc0, 0x39, 0x8a, 0x3f, 0x58, 0xb2, 0x41, 0x6f, 0x02, 0xc8, 0x1d, 0x82, 0xd4, 0x85, + 0x0b, 0xcf, 0x33, 0xdd, 0x99, 0xae, 0xab, 0x32, 0xf3, 0x63, 0x54, 0x90, 0x89, 0xff, 0x63, 0x8d, + 0x9f, 0x1d, 0x69, 0x63, 0xaa, 0x37, 0x06, 0x7d, 0x9c, 0x2e, 0x51, 0x27, 0x88, 0x48, 0x7d, 0x2e, + 0x12, 0x9d, 0xf3, 0xde, 0xde, 0x6e, 0x72, 0xeb, 0x6e, 0x93, 0xe8, 0xcb, 0x59, 0x30, 0xc1, 0x31, + 0x3f, 0xfb, 0x17, 0x8b, 0x30, 0x9d, 0xd7, 0x5c, 0xba, 0x68, 0xc8, 0x1d, 0x37, 0x5a, 0xa0, 0x32, + 0xa8, 0x65, 0x2e, 0x9a, 0x25, 0x01, 0xc7, 0x8a, 0x82, 0xce, 0xde, 0xd0, 0xdd, 0x92, 0x17, 0xf1, + 0xfe, 0x78, 0xf6, 0x56, 0x19, 0x14, 0x0b, 0x2c, 0xa5, 0x0b, 0x88, 0x13, 0x0a, 0x1f, 0x31, 0x6d, + 0x96, 0x63, 0x06, 0xc5, 0x02, 0xab, 0xab, 0x04, 0xfb, 0xba, 0xa8, 0x04, 0x8d, 0x2e, 0xea, 0x3f, + 0xda, 0x2e, 0x42, 0x9f, 0x04, 0xd8, 0x74, 0x3d, 0x37, 0xdc, 0x66, 0xdc, 0x07, 0x0e, 0xcd, 0x5d, + 0x49, 0xb0, 0xcb, 0x8a, 0x0b, 0xd6, 0x38, 0xa2, 0x17, 0x61, 0x58, 0x6d, 0x20, 0x2b, 0x8b, 0xcc, + 0x60, 0xae, 0x39, 0x20, 0xc5, 0xbb, 0xe9, 0x22, 0xd6, 0xe9, 0xec, 0x4f, 0x27, 0xe7, 0x8b, 0x58, + 0x01, 0x5a, 0xff, 0x5a, 0xbd, 0xf6, 0x6f, 0xa1, 0x73, 0xff, 0xda, 0xdf, 0x18, 0x80, 0x71, 0xa3, + 0xb2, 0x76, 0xd8, 0xc3, 0x9e, 0x7b, 0x99, 0x1e, 0x40, 0x4e, 0x44, 0xc4, 0xfa, 0xb3, 0xbb, 0x2f, + 0x15, 0xfd, 0x90, 0xa2, 0x2b, 0x80, 0x97, 0x47, 0x9f, 0x84, 0x52, 0xc3, 0x09, 0x99, 0x7a, 0x91, + 0x88, 0x75, 0xd7, 0x0b, 0xb3, 0xf8, 0xf6, 0xe6, 0x84, 0x91, 0x76, 0xea, 0x73, 0xde, 0x31, 0x4b, + 0x7a, 0x52, 0x52, 0xf9, 0x4a, 0x3a, 0x21, 0xaa, 0x46, 0x50, 0x21, 0x6c, 0x0f, 0x73, 0x1c, 0x7a, + 0x89, 0x6d, 0xad, 0x74, 0x56, 0x2c, 0x50, 0x69, 0x94, 0x4d, 0xb3, 0x7e, 0x43, 0x22, 0x56, 0x38, + 0x6c, 0x50, 0xc6, 0x17, 0xa8, 0x81, 0x0e, 0x17, 0xa8, 0xa7, 0x60, 0x90, 0xfd, 0x50, 0x33, 0x40, + 0x8d, 0xc6, 0x0a, 0x07, 0x63, 0x89, 0x4f, 0x4e, 0x98, 0xa1, 0xde, 0x26, 0x0c, 0xbd, 0xa2, 0x89, + 0x49, 0xcd, 0x9c, 0x15, 0x86, 0xf8, 0x2e, 0x27, 0xa6, 0x3c, 0x96, 0x38, 0xf4, 0xb3, 0x16, 0x20, + 0xa7, 0x41, 0xaf, 0xb6, 0x14, 0xac, 0x6e, 0x22, 0xc0, 0x44, 0xed, 0x57, 0xba, 0x76, 0x7b, 0x3b, + 0x9c, 0x9d, 0x4b, 0x95, 0xe6, 0x6a, 0xcd, 0x97, 0x45, 0x13, 0x51, 0x9a, 0x40, 0x3f, 0x8c, 0xae, + 0xb9, 0x61, 0xf4, 0xb9, 0xff, 0x98, 0x38, 0x9c, 0x32, 0x9a, 0x84, 0x6e, 0xe8, 0x37, 0xa5, 0xe1, + 0x43, 0xde, 0x94, 0x46, 0xf3, 0x6e, 0x49, 0x33, 0x6d, 0x78, 0x28, 0xe7, 0x0b, 0x32, 0x94, 0xa5, + 0x8b, 0xba, 0xb2, 0xb4, 0x8b, 0x8a, 0x6d, 0x56, 0xd6, 0x31, 0xfb, 0x7a, 0xdb, 0xf1, 0x22, 0x37, + 0xda, 0xd3, 0x95, 0xab, 0xef, 0x85, 0xb1, 0x45, 0x87, 0x34, 0x7d, 0x6f, 0xc9, 0xab, 0xb7, 0x7c, + 0xd7, 0x8b, 0xd0, 0x34, 0xf4, 0x31, 0xe1, 0x83, 0x6f, 0xbd, 0x7d, 0xb4, 0xf7, 0x30, 0x83, 0xd8, + 0x5b, 0x70, 0x72, 0xd1, 0xbf, 0xed, 0xdd, 0x76, 0x82, 0xfa, 0x5c, 0x65, 0x45, 0x53, 0xfe, 0xac, + 0x49, 0xe5, 0x83, 0x95, 0x7f, 0xb5, 0xd3, 0x4a, 0xf2, 0xeb, 0xd0, 0xb2, 0xdb, 0x20, 0x39, 0x2a, + 0xba, 0xbf, 0x56, 0x30, 0x6a, 0x8a, 0xe9, 0x95, 0x91, 0xd8, 0xca, 0x35, 0x12, 0xbf, 0x0e, 0x43, + 0x9b, 0x2e, 0x69, 0xd4, 0x31, 0xd9, 0x14, 0xbd, 0xf3, 0x64, 0xbe, 0x1b, 0xd9, 0x32, 0xa5, 0x94, + 0x2a, 0x59, 0xae, 0xba, 0x58, 0x16, 0x85, 0xb1, 0x62, 0x83, 0x76, 0x60, 0x42, 0xf6, 0xa1, 0xc4, + 0x8a, 0xfd, 0xe0, 0xa9, 0x4e, 0x03, 0x6f, 0x32, 0x3f, 0x71, 0x6f, 0xbf, 0x3c, 0x81, 0x13, 0x6c, + 0x70, 0x8a, 0x31, 0x7a, 0x04, 0xfa, 0x9a, 0xf4, 0xe4, 0xeb, 0x63, 0xdd, 0xcf, 0x74, 0x15, 0x4c, + 0xed, 0xc2, 0xa0, 0xf6, 0x8f, 0x5b, 0xf0, 0x50, 0xaa, 0x67, 0x84, 0xfa, 0xe9, 0x88, 0x47, 0x21, + 0xa9, 0x0e, 0x2a, 0x74, 0x57, 0x07, 0xd9, 0x7f, 0xdb, 0x82, 0x13, 0x4b, 0xcd, 0x56, 0xb4, 0xb7, + 0xe8, 0x9a, 0x16, 0xdd, 0x0f, 0xc0, 0x40, 0x93, 0xd4, 0xdd, 0x76, 0x53, 0x8c, 0x5c, 0x59, 0x9e, + 0x0e, 0xab, 0x0c, 0x7a, 0xb0, 0x5f, 0x1e, 0xad, 0x46, 0x7e, 0xe0, 0x6c, 0x11, 0x0e, 0xc0, 0x82, + 0x9c, 0x9d, 0xb1, 0xee, 0x5d, 0x72, 0xcd, 0x6d, 0xba, 0xd1, 0xfd, 0xcd, 0x76, 0x61, 0x8c, 0x95, + 0x4c, 0x70, 0xcc, 0xcf, 0xfe, 0xba, 0x05, 0xe3, 0x72, 0xde, 0xcf, 0xd5, 0xeb, 0x01, 0x09, 0x43, + 0x34, 0x03, 0x05, 0xb7, 0x25, 0x5a, 0x09, 0xa2, 0x95, 0x85, 0x95, 0x0a, 0x2e, 0xb8, 0x2d, 0x29, + 0xce, 0xb3, 0x03, 0xa8, 0x68, 0xda, 0xa5, 0xaf, 0x08, 0x38, 0x56, 0x14, 0xe8, 0x02, 0x0c, 0x79, + 0x7e, 0x9d, 0x4b, 0xc4, 0x5c, 0x94, 0x60, 0x13, 0x6c, 0x4d, 0xc0, 0xb0, 0xc2, 0xa2, 0x0a, 0x94, + 0xb8, 0xd7, 0x62, 0x3c, 0x69, 0x7b, 0xf2, 0x7d, 0x64, 0x5f, 0xb6, 0x2e, 0x4b, 0xe2, 0x98, 0x89, + 0xfd, 0xeb, 0x16, 0x8c, 0xc8, 0x2f, 0xeb, 0xf1, 0xae, 0x42, 0x97, 0x56, 0x7c, 0x4f, 0x89, 0x97, + 0x16, 0xbd, 0x6b, 0x30, 0x8c, 0x71, 0xc5, 0x28, 0x1e, 0xea, 0x8a, 0x71, 0x09, 0x86, 0x9d, 0x56, + 0xab, 0x62, 0xde, 0x4f, 0xd8, 0x54, 0x9a, 0x8b, 0xc1, 0x58, 0xa7, 0xb1, 0x7f, 0xac, 0x00, 0x63, + 0xf2, 0x0b, 0xaa, 0xed, 0x8d, 0x90, 0x44, 0x68, 0x1d, 0x4a, 0x0e, 0x1f, 0x25, 0x22, 0x27, 0xf9, + 0x63, 0xd9, 0x4a, 0x2e, 0x63, 0x48, 0x63, 0x41, 0x6b, 0x4e, 0x96, 0xc6, 0x31, 0x23, 0xd4, 0x80, + 0x49, 0xcf, 0x8f, 0xd8, 0xa1, 0xab, 0xf0, 0x9d, 0xec, 0x8e, 0x49, 0xee, 0xa7, 0x05, 0xf7, 0xc9, + 0xb5, 0x24, 0x17, 0x9c, 0x66, 0x8c, 0x96, 0xa4, 0xe2, 0xb0, 0x98, 0xaf, 0x44, 0xd2, 0x07, 0x2e, + 0x5b, 0x6f, 0x68, 0xff, 0x8a, 0x05, 0x25, 0x49, 0x76, 0x1c, 0x26, 0xe6, 0x55, 0x18, 0x0c, 0xd9, + 0x20, 0xc8, 0xae, 0xb1, 0x3b, 0x35, 0x9c, 0x8f, 0x57, 0x2c, 0x4b, 0xf0, 0xff, 0x21, 0x96, 0x3c, + 0x98, 0xdd, 0x48, 0x35, 0xff, 0x1d, 0x62, 0x37, 0x52, 0xed, 0xc9, 0x39, 0x94, 0xfe, 0x88, 0xb5, + 0x59, 0x53, 0xc4, 0x52, 0x91, 0xb7, 0x15, 0x90, 0x4d, 0xf7, 0x4e, 0x52, 0xe4, 0xad, 0x30, 0x28, + 0x16, 0x58, 0xf4, 0x26, 0x8c, 0xd4, 0xa4, 0xc1, 0x20, 0x5e, 0xe1, 0xe7, 0x3b, 0x1a, 0xaf, 0x94, + 0x9d, 0x93, 0xeb, 0xd0, 0x16, 0xb4, 0xf2, 0xd8, 0xe0, 0x66, 0x7a, 0xe5, 0x14, 0xbb, 0x79, 0xe5, + 0xc4, 0x7c, 0xf3, 0x7d, 0x54, 0x7e, 0xc2, 0x82, 0x01, 0xae, 0x28, 0xee, 0x4d, 0x4f, 0xaf, 0x99, + 0x7d, 0xe3, 0xbe, 0xbb, 0x49, 0x81, 0x42, 0xd2, 0x40, 0xab, 0x50, 0x62, 0x3f, 0x98, 0xa2, 0xbb, + 0x98, 0xff, 0x68, 0x86, 0xd7, 0xaa, 0x37, 0xf0, 0xa6, 0x2c, 0x86, 0x63, 0x0e, 0xf6, 0x8f, 0x16, + 0xe9, 0xee, 0x16, 0x93, 0x1a, 0x87, 0xbe, 0xf5, 0xe0, 0x0e, 0xfd, 0xc2, 0x83, 0x3a, 0xf4, 0xb7, + 0x60, 0xbc, 0xa6, 0x19, 0x89, 0xe3, 0x91, 0xbc, 0xd0, 0x71, 0x92, 0x68, 0xf6, 0x64, 0xae, 0x9d, + 0x5b, 0x30, 0x99, 0xe0, 0x24, 0x57, 0xf4, 0x71, 0x18, 0xe1, 0xe3, 0x2c, 0x6a, 0xe1, 0x8e, 0x4d, + 0x4f, 0xe4, 0xcf, 0x17, 0xbd, 0x0a, 0xae, 0xcd, 0xd5, 0x8a, 0x63, 0x83, 0x99, 0xfd, 0xa7, 0x16, + 0xa0, 0xa5, 0xd6, 0x36, 0x69, 0x92, 0xc0, 0x69, 0xc4, 0xb6, 0x9e, 0x1f, 0xb2, 0x60, 0x9a, 0xa4, + 0xc0, 0x0b, 0x7e, 0xb3, 0x29, 0x2e, 0x8b, 0x39, 0xfa, 0x8c, 0xa5, 0x9c, 0x32, 0xea, 0x55, 0xd1, + 0x74, 0x1e, 0x05, 0xce, 0xad, 0x0f, 0xad, 0xc2, 0x14, 0x3f, 0x25, 0x15, 0x42, 0x73, 0x92, 0x7a, + 0x58, 0x30, 0x9e, 0x5a, 0x4f, 0x93, 0xe0, 0xac, 0x72, 0xf6, 0x37, 0x47, 0x20, 0xb7, 0x15, 0xef, + 0x1a, 0xb9, 0xde, 0x35, 0x72, 0xbd, 0x6b, 0xe4, 0x7a, 0xd7, 0xc8, 0xf5, 0xae, 0x91, 0xeb, 0x5d, + 0x23, 0xd7, 0x51, 0x18, 0xb9, 0xfe, 0x8a, 0x05, 0x27, 0xd5, 0x59, 0x63, 0xdc, 0xae, 0x3f, 0x03, + 0x53, 0x7c, 0xb9, 0x19, 0xde, 0xbb, 0xe2, 0x6c, 0xbd, 0x94, 0x39, 0x73, 0x13, 0x5e, 0xe6, 0x46, + 0x41, 0xfe, 0x5c, 0x27, 0x03, 0x81, 0xb3, 0xaa, 0xb1, 0x7f, 0x71, 0x08, 0xfa, 0x97, 0x76, 0x89, + 0x17, 0x1d, 0xc3, 0x3d, 0xa4, 0x06, 0x63, 0xae, 0xb7, 0xeb, 0x37, 0x76, 0x49, 0x9d, 0xe3, 0x0f, + 0x73, 0x5d, 0x3e, 0x25, 0x58, 0x8f, 0xad, 0x18, 0x2c, 0x70, 0x82, 0xe5, 0x83, 0x30, 0x15, 0x5c, + 0x86, 0x01, 0x7e, 0x52, 0x08, 0x3b, 0x41, 0xe6, 0x9e, 0xcd, 0x3a, 0x51, 0x9c, 0x7f, 0xb1, 0x19, + 0x83, 0x9f, 0x44, 0xa2, 0x38, 0xfa, 0x34, 0x8c, 0x6d, 0xba, 0x41, 0x18, 0xad, 0xbb, 0x4d, 0x12, + 0x46, 0x4e, 0xb3, 0x75, 0x1f, 0xa6, 0x01, 0xd5, 0x0f, 0xcb, 0x06, 0x27, 0x9c, 0xe0, 0x8c, 0xb6, + 0x60, 0xb4, 0xe1, 0xe8, 0x55, 0x0d, 0x1e, 0xba, 0x2a, 0x75, 0x3a, 0x5c, 0xd3, 0x19, 0x61, 0x93, + 0x2f, 0x5d, 0x4e, 0x35, 0xa6, 0xdd, 0x1e, 0x62, 0xba, 0x07, 0xb5, 0x9c, 0xb8, 0x5a, 0x9b, 0xe3, + 0xa8, 0x34, 0xc5, 0x5c, 0xc4, 0x4b, 0xa6, 0x34, 0xa5, 0x39, 0x82, 0x7f, 0x0a, 0x4a, 0x84, 0x76, + 0x21, 0x65, 0x2c, 0x0e, 0x98, 0x8b, 0xbd, 0xb5, 0x75, 0xd5, 0xad, 0x05, 0xbe, 0x69, 0x94, 0x59, + 0x92, 0x9c, 0x70, 0xcc, 0x14, 0x2d, 0xc0, 0x40, 0x48, 0x02, 0x57, 0x29, 0x7e, 0x3b, 0x0c, 0x23, + 0x23, 0xe3, 0xef, 0xc1, 0xf8, 0x6f, 0x2c, 0x8a, 0xd2, 0xe9, 0xe5, 0x30, 0xbd, 0x29, 0x3b, 0x0c, + 0xb4, 0xe9, 0x35, 0xc7, 0xa0, 0x58, 0x60, 0xd1, 0x6b, 0x30, 0x18, 0x90, 0x06, 0xb3, 0xfa, 0x8d, + 0xf6, 0x3e, 0xc9, 0xb9, 0x11, 0x91, 0x97, 0xc3, 0x92, 0x01, 0xba, 0x0a, 0x28, 0x20, 0x54, 0x1a, + 0x73, 0xbd, 0x2d, 0xe5, 0x38, 0x2d, 0x36, 0x5a, 0x25, 0xf5, 0xe2, 0x98, 0x42, 0x3e, 0x05, 0xc4, + 0x19, 0xc5, 0xd0, 0x65, 0x98, 0x54, 0xd0, 0x15, 0x2f, 0x8c, 0x1c, 0xba, 0xc1, 0x8d, 0x33, 0x5e, + 0x4a, 0x19, 0x82, 0x93, 0x04, 0x38, 0x5d, 0xc6, 0xfe, 0x92, 0x05, 0xbc, 0x9f, 0x8f, 0x41, 0x05, + 0xf0, 0xaa, 0xa9, 0x02, 0x38, 0x9d, 0x3b, 0x72, 0x39, 0xd7, 0xff, 0x2f, 0x59, 0x30, 0xac, 0x8d, + 0x6c, 0x3c, 0x67, 0xad, 0x0e, 0x73, 0xb6, 0x0d, 0x13, 0x74, 0xa6, 0x5f, 0xdf, 0x08, 0x49, 0xb0, + 0x4b, 0xea, 0x6c, 0x62, 0x16, 0xee, 0x6f, 0x62, 0x2a, 0x27, 0xcd, 0x6b, 0x09, 0x86, 0x38, 0x55, + 0x85, 0xfd, 0x29, 0xd9, 0x54, 0xe5, 0xd3, 0x5a, 0x53, 0x63, 0x9e, 0xf0, 0x69, 0x55, 0xa3, 0x8a, + 0x63, 0x1a, 0xba, 0xd4, 0xb6, 0xfd, 0x30, 0x4a, 0xfa, 0xb4, 0x5e, 0xf1, 0xc3, 0x08, 0x33, 0x8c, + 0xfd, 0x3c, 0xc0, 0xd2, 0x1d, 0x52, 0xe3, 0x33, 0x56, 0xbf, 0xa1, 0x58, 0xf9, 0x37, 0x14, 0xfb, + 0x77, 0x2c, 0x18, 0x5b, 0x5e, 0x30, 0x4e, 0xae, 0x59, 0x00, 0x7e, 0xad, 0xba, 0x75, 0x6b, 0x4d, + 0xfa, 0x6a, 0x70, 0x73, 0xb5, 0x82, 0x62, 0x8d, 0x02, 0x9d, 0x86, 0x62, 0xa3, 0xed, 0x09, 0x1d, + 0xe5, 0x20, 0x3d, 0x1e, 0xaf, 0xb5, 0x3d, 0x4c, 0x61, 0xda, 0x33, 0xa0, 0x62, 0xcf, 0xcf, 0x80, + 0xba, 0x86, 0xff, 0x40, 0x65, 0xe8, 0xbf, 0x7d, 0xdb, 0xad, 0xf3, 0x47, 0xd6, 0xc2, 0x8f, 0xe4, + 0xd6, 0xad, 0x95, 0xc5, 0x10, 0x73, 0xb8, 0xfd, 0x85, 0x22, 0xcc, 0x2c, 0x37, 0xc8, 0x9d, 0xb7, + 0xf9, 0xd0, 0xbc, 0xd7, 0x47, 0x4c, 0x87, 0xd3, 0xf6, 0x1c, 0xf6, 0xa1, 0x5a, 0xf7, 0xfe, 0xd8, + 0x84, 0x41, 0xee, 0xd2, 0x29, 0x9f, 0x9d, 0x67, 0xda, 0xe6, 0xf2, 0x3b, 0x64, 0x96, 0xbb, 0x86, + 0x0a, 0xdb, 0x9c, 0x3a, 0x30, 0x05, 0x14, 0x4b, 0xe6, 0x33, 0x2f, 0xc3, 0x88, 0x4e, 0x79, 0xa8, + 0x27, 0xa3, 0xdf, 0x53, 0x84, 0x09, 0xda, 0x82, 0x07, 0x3a, 0x10, 0x37, 0xd2, 0x03, 0x71, 0xd4, + 0xcf, 0x06, 0xbb, 0x8f, 0xc6, 0x9b, 0xc9, 0xd1, 0xb8, 0x94, 0x37, 0x1a, 0xc7, 0x3d, 0x06, 0xdf, + 0x6b, 0xc1, 0xd4, 0x72, 0xc3, 0xaf, 0xed, 0x24, 0x9e, 0xf6, 0xbd, 0x08, 0xc3, 0x74, 0x3b, 0x0e, + 0x8d, 0x28, 0x17, 0x46, 0xdc, 0x13, 0x81, 0xc2, 0x3a, 0x9d, 0x56, 0xec, 0xc6, 0x8d, 0x95, 0xc5, + 0xac, 0x70, 0x29, 0x02, 0x85, 0x75, 0x3a, 0xfb, 0xb7, 0x2c, 0x38, 0x73, 0x79, 0x61, 0x29, 0x9e, + 0x8a, 0xa9, 0x88, 0x2d, 0xe7, 0x61, 0xa0, 0x55, 0xd7, 0x9a, 0x12, 0xeb, 0x70, 0x17, 0x59, 0x2b, + 0x04, 0xf6, 0x9d, 0x12, 0x8d, 0xe8, 0x06, 0xc0, 0x65, 0x5c, 0x59, 0x10, 0xfb, 0xae, 0x34, 0xd9, + 0x58, 0xb9, 0x26, 0x9b, 0x27, 0x60, 0x90, 0x9e, 0x0b, 0x6e, 0x4d, 0xb6, 0x9b, 0x5b, 0xdf, 0x39, + 0x08, 0x4b, 0x9c, 0xfd, 0x73, 0x16, 0x4c, 0x5d, 0x76, 0x23, 0x7a, 0x68, 0x27, 0x43, 0x92, 0xd0, + 0x53, 0x3b, 0x74, 0x23, 0x3f, 0xd8, 0x4b, 0x86, 0x24, 0xc1, 0x0a, 0x83, 0x35, 0x2a, 0xfe, 0x41, + 0xbb, 0x2e, 0x7b, 0xa3, 0x50, 0x30, 0x8d, 0x64, 0x58, 0xc0, 0xb1, 0xa2, 0xa0, 0xfd, 0x55, 0x77, + 0x03, 0xa6, 0x5f, 0xdc, 0x13, 0x1b, 0xb7, 0xea, 0xaf, 0x45, 0x89, 0xc0, 0x31, 0x8d, 0xfd, 0xc7, + 0x16, 0x94, 0x2f, 0x37, 0xda, 0x61, 0x44, 0x82, 0xcd, 0x30, 0x67, 0xd3, 0x7d, 0x1e, 0x4a, 0x44, + 0x6a, 0xf3, 0xe5, 0x63, 0x4a, 0x29, 0x88, 0x2a, 0x35, 0x3f, 0x8f, 0x8c, 0xa2, 0xe8, 0x7a, 0x78, + 0x7f, 0x7c, 0xb8, 0x07, 0xa4, 0xcb, 0x80, 0x88, 0x5e, 0x97, 0x1e, 0x2a, 0x86, 0xc5, 0x9c, 0x58, + 0x4a, 0x61, 0x71, 0x46, 0x09, 0xfb, 0xc7, 0x2d, 0x38, 0xa9, 0x3e, 0xf8, 0x1d, 0xf7, 0x99, 0xf6, + 0x57, 0x0b, 0x30, 0x7a, 0x65, 0x7d, 0xbd, 0x72, 0x99, 0x44, 0xda, 0xac, 0xec, 0x6c, 0xa3, 0xc7, + 0x9a, 0xa9, 0xb1, 0xd3, 0x1d, 0xb1, 0x1d, 0xb9, 0x8d, 0x59, 0x1e, 0x71, 0x6c, 0x76, 0xc5, 0x8b, + 0xae, 0x07, 0xd5, 0x28, 0x70, 0xbd, 0xad, 0xcc, 0x99, 0x2e, 0x65, 0x96, 0x62, 0x9e, 0xcc, 0x82, + 0x9e, 0x87, 0x01, 0x16, 0xf2, 0x4c, 0x0e, 0xc2, 0xc3, 0xea, 0x8a, 0xc5, 0xa0, 0x07, 0xfb, 0xe5, + 0xd2, 0x0d, 0xbc, 0xc2, 0xff, 0x60, 0x41, 0x8a, 0x6e, 0xc0, 0xf0, 0x76, 0x14, 0xb5, 0xae, 0x10, + 0xa7, 0x4e, 0x02, 0xb9, 0xcb, 0x9e, 0xcd, 0xda, 0x65, 0x69, 0x27, 0x70, 0xb2, 0x78, 0x63, 0x8a, + 0x61, 0x21, 0xd6, 0xf9, 0xd8, 0x55, 0x80, 0x18, 0x77, 0x44, 0x56, 0x16, 0x7b, 0x1d, 0x4a, 0xf4, + 0x73, 0xe7, 0x1a, 0xae, 0xd3, 0xd9, 0x8e, 0xfd, 0x34, 0x94, 0xa4, 0x95, 0x3a, 0x14, 0xf1, 0x11, + 0xd8, 0x89, 0x24, 0x8d, 0xd8, 0x21, 0x8e, 0xf1, 0xf6, 0x26, 0x9c, 0x60, 0xbe, 0xaa, 0x4e, 0xb4, + 0x6d, 0xcc, 0xbe, 0xee, 0xc3, 0xfc, 0x8c, 0xb8, 0xb1, 0xf1, 0x36, 0x4f, 0x6b, 0x0f, 0x7a, 0x47, + 0x24, 0xc7, 0xf8, 0xf6, 0x66, 0x7f, 0xb3, 0x0f, 0x1e, 0x5e, 0xa9, 0xe6, 0x87, 0xec, 0x79, 0x09, + 0x46, 0xb8, 0x20, 0x48, 0x07, 0xdd, 0x69, 0x88, 0x7a, 0x95, 0x6e, 0x73, 0x5d, 0xc3, 0x61, 0x83, + 0x12, 0x9d, 0x81, 0xa2, 0xfb, 0x96, 0x97, 0x7c, 0xee, 0xb6, 0xf2, 0xfa, 0x1a, 0xa6, 0x70, 0x8a, + 0xa6, 0x32, 0x25, 0xdf, 0xac, 0x15, 0x5a, 0xc9, 0x95, 0xaf, 0xc2, 0x98, 0x1b, 0xd6, 0x42, 0x77, + 0xc5, 0xa3, 0x2b, 0x50, 0x5b, 0xc3, 0x4a, 0x9b, 0x40, 0x1b, 0xad, 0xb0, 0x38, 0x41, 0xad, 0x9d, + 0x1c, 0xfd, 0x3d, 0xcb, 0xa5, 0x5d, 0x03, 0x06, 0xd0, 0x8d, 0xbd, 0xc5, 0xbe, 0x2e, 0x64, 0x9a, + 0x70, 0xb1, 0xb1, 0xf3, 0x0f, 0x0e, 0xb1, 0xc4, 0xd1, 0xab, 0x5a, 0x6d, 0xdb, 0x69, 0xcd, 0xb5, + 0xa3, 0xed, 0x45, 0x37, 0xac, 0xf9, 0xbb, 0x24, 0xd8, 0x63, 0xb7, 0xec, 0xa1, 0xf8, 0xaa, 0xa6, + 0x10, 0x0b, 0x57, 0xe6, 0x2a, 0x94, 0x12, 0xa7, 0xcb, 0xa0, 0x39, 0x18, 0x97, 0xc0, 0x2a, 0x09, + 0xd9, 0xe6, 0x3e, 0xcc, 0xd8, 0xa8, 0x07, 0x68, 0x02, 0xac, 0x98, 0x24, 0xe9, 0x4d, 0xd1, 0x15, + 0x8e, 0x42, 0x74, 0xfd, 0x00, 0x8c, 0xba, 0x9e, 0x1b, 0xb9, 0x4e, 0xe4, 0x73, 0x33, 0x0e, 0xbf, + 0x50, 0x33, 0xd5, 0xf1, 0x8a, 0x8e, 0xc0, 0x26, 0x9d, 0xfd, 0x9f, 0xfa, 0x60, 0x92, 0x0d, 0xdb, + 0xbb, 0x33, 0xec, 0x3b, 0x69, 0x86, 0xdd, 0x48, 0xcf, 0xb0, 0xa3, 0x90, 0xc9, 0xef, 0x7b, 0x9a, + 0x7d, 0x1a, 0x4a, 0xea, 0xcd, 0x9d, 0x7c, 0x74, 0x6b, 0xe5, 0x3c, 0xba, 0xed, 0x7e, 0x2e, 0x4b, + 0xcf, 0xb0, 0x62, 0xa6, 0x67, 0xd8, 0x97, 0x2d, 0x88, 0x4d, 0x06, 0xe8, 0x75, 0x28, 0xb5, 0x7c, + 0xe6, 0x68, 0x1a, 0x48, 0xef, 0xed, 0xc7, 0x3b, 0xda, 0x1c, 0x78, 0xd4, 0xb2, 0x80, 0xf7, 0x42, + 0x45, 0x16, 0xc5, 0x31, 0x17, 0x74, 0x15, 0x06, 0x5b, 0x01, 0xa9, 0x46, 0x2c, 0xa4, 0x4e, 0xef, + 0x0c, 0xf9, 0xac, 0xe1, 0x05, 0xb1, 0xe4, 0x60, 0xff, 0x67, 0x0b, 0x26, 0x92, 0xa4, 0xe8, 0x43, + 0xd0, 0x47, 0xee, 0x90, 0x9a, 0x68, 0x6f, 0xe6, 0x21, 0x1b, 0x2b, 0x1d, 0x78, 0x07, 0xd0, 0xff, + 0x98, 0x95, 0x42, 0x57, 0x60, 0x90, 0x9e, 0xb0, 0x97, 0x55, 0xf8, 0xb8, 0x47, 0xf3, 0x4e, 0x69, + 0x25, 0xaa, 0xf0, 0xc6, 0x09, 0x10, 0x96, 0xc5, 0x99, 0x3b, 0x56, 0xad, 0x55, 0xa5, 0x97, 0x97, + 0xa8, 0xd3, 0x1d, 0x7b, 0x7d, 0xa1, 0xc2, 0x89, 0x04, 0x37, 0xee, 0x8e, 0x25, 0x81, 0x38, 0x66, + 0x62, 0xff, 0xbc, 0x05, 0xc0, 0xbd, 0xcf, 0x1c, 0x6f, 0x8b, 0x1c, 0x83, 0x9e, 0x7c, 0x11, 0xfa, + 0xc2, 0x16, 0xa9, 0x75, 0xf2, 0x81, 0x8e, 0xdb, 0x53, 0x6d, 0x91, 0x5a, 0x3c, 0xe3, 0xe8, 0x3f, + 0xcc, 0x4a, 0xdb, 0xdf, 0x07, 0x30, 0x16, 0x93, 0xad, 0x44, 0xa4, 0x89, 0x9e, 0x35, 0x02, 0x75, + 0x9c, 0x4e, 0x04, 0xea, 0x28, 0x31, 0x6a, 0x4d, 0x25, 0xfb, 0x69, 0x28, 0x36, 0x9d, 0x3b, 0x42, + 0xe7, 0xf6, 0x74, 0xe7, 0x66, 0x50, 0xfe, 0xb3, 0xab, 0xce, 0x1d, 0x7e, 0x2d, 0x7d, 0x5a, 0xae, + 0x90, 0x55, 0xe7, 0x4e, 0x57, 0x3f, 0x5d, 0x5a, 0x09, 0xab, 0xcb, 0xf5, 0x84, 0x63, 0x55, 0x4f, + 0x75, 0xb9, 0x5e, 0xb2, 0x2e, 0xd7, 0xeb, 0xa1, 0x2e, 0xd7, 0x43, 0x77, 0x61, 0x50, 0xf8, 0x3d, + 0x8a, 0x50, 0x5e, 0x17, 0x7b, 0xa8, 0x4f, 0xb8, 0x4d, 0xf2, 0x3a, 0x2f, 0xca, 0x6b, 0xb7, 0x80, + 0x76, 0xad, 0x57, 0x56, 0x88, 0xfe, 0xaa, 0x05, 0x63, 0xe2, 0x37, 0x26, 0x6f, 0xb5, 0x49, 0x18, + 0x09, 0xb1, 0xf4, 0xfd, 0xbd, 0xb7, 0x41, 0x14, 0xe4, 0x4d, 0x79, 0xbf, 0x3c, 0x67, 0x4c, 0x64, + 0xd7, 0x16, 0x25, 0x5a, 0x81, 0xfe, 0xae, 0x05, 0x27, 0x9a, 0xce, 0x1d, 0x5e, 0x23, 0x87, 0x61, + 0x27, 0x72, 0x7d, 0xe1, 0x3f, 0xf0, 0xa1, 0xde, 0x86, 0x3f, 0x55, 0x9c, 0x37, 0x52, 0xda, 0x1f, + 0x4f, 0x64, 0x91, 0x74, 0x6d, 0x6a, 0x66, 0xbb, 0x66, 0x36, 0x61, 0x48, 0xce, 0xb7, 0x07, 0xe9, + 0x64, 0xcd, 0xea, 0x11, 0x73, 0xed, 0x81, 0xd6, 0xf3, 0x69, 0x18, 0xd1, 0xe7, 0xd8, 0x03, 0xad, + 0xeb, 0x2d, 0x98, 0xca, 0x98, 0x4b, 0x0f, 0xb4, 0xca, 0xdb, 0x70, 0x3a, 0x77, 0x7e, 0x3c, 0x50, + 0x27, 0xf9, 0xaf, 0x5a, 0xfa, 0x3e, 0x78, 0x0c, 0xc6, 0x8a, 0x05, 0xd3, 0x58, 0x71, 0xb6, 0xf3, + 0xca, 0xc9, 0xb1, 0x58, 0xbc, 0xa9, 0x37, 0x9a, 0xee, 0xea, 0xe8, 0x35, 0x18, 0x68, 0x50, 0x88, + 0xf4, 0x9e, 0xb5, 0xbb, 0xaf, 0xc8, 0x58, 0x98, 0x64, 0xf0, 0x10, 0x0b, 0x0e, 0xf6, 0x2f, 0x59, + 0xd0, 0x77, 0x0c, 0x3d, 0x81, 0xcd, 0x9e, 0x78, 0x36, 0x97, 0xb5, 0x88, 0x6a, 0x3e, 0x8b, 0x9d, + 0xdb, 0x4b, 0x77, 0x22, 0xe2, 0x85, 0xec, 0x44, 0xce, 0xec, 0x98, 0x9f, 0xb6, 0x60, 0xea, 0x9a, + 0xef, 0xd4, 0xe7, 0x9d, 0x86, 0xe3, 0xd5, 0x48, 0xb0, 0xe2, 0x6d, 0x1d, 0xca, 0xf5, 0xbb, 0xd0, + 0xd5, 0xf5, 0x7b, 0x41, 0x7a, 0x4e, 0xf5, 0xe5, 0x8f, 0x1f, 0x95, 0xa4, 0x93, 0xa1, 0x8b, 0x0c, + 0x1f, 0xdf, 0x6d, 0x40, 0x7a, 0x2b, 0xc5, 0x03, 0x28, 0x0c, 0x83, 0x2e, 0x6f, 0xaf, 0x18, 0xc4, + 0x27, 0xb3, 0x25, 0xdc, 0xd4, 0xe7, 0x69, 0x4f, 0x7b, 0x38, 0x00, 0x4b, 0x46, 0xf6, 0x4b, 0x90, + 0x19, 0x6a, 0xa2, 0xbb, 0x5e, 0xc2, 0xfe, 0x18, 0x4c, 0xb2, 0x92, 0x87, 0xd4, 0x0c, 0xd8, 0x09, + 0x6d, 0x6a, 0x46, 0xd8, 0x4c, 0xfb, 0xf3, 0x16, 0x8c, 0xaf, 0x25, 0xa2, 0x09, 0x9e, 0x67, 0xf6, + 0xd7, 0x0c, 0x25, 0x7e, 0x95, 0x41, 0xb1, 0xc0, 0x1e, 0xb9, 0x92, 0xeb, 0xcf, 0x2d, 0x88, 0xa3, + 0xbf, 0x1c, 0x83, 0xf8, 0xb6, 0x60, 0x88, 0x6f, 0x99, 0x82, 0xac, 0x6a, 0x4e, 0x9e, 0xf4, 0x86, + 0xae, 0xaa, 0xb8, 0x68, 0x1d, 0x64, 0xd8, 0x98, 0x0d, 0x9f, 0x8a, 0x63, 0x66, 0xf0, 0x34, 0x19, + 0x29, 0xcd, 0xfe, 0xdd, 0x02, 0x20, 0x45, 0xdb, 0x73, 0xdc, 0xb6, 0x74, 0x89, 0xa3, 0x89, 0xdb, + 0xb6, 0x0b, 0x88, 0x79, 0x10, 0x04, 0x8e, 0x17, 0x72, 0xb6, 0xae, 0x50, 0xeb, 0x1d, 0xce, 0x3d, + 0x61, 0x46, 0xbe, 0x0d, 0xbb, 0x96, 0xe2, 0x86, 0x33, 0x6a, 0xd0, 0x3c, 0x43, 0xfa, 0x7b, 0xf5, + 0x0c, 0x19, 0xe8, 0xf2, 0xc8, 0xf1, 0x2b, 0x16, 0x8c, 0xaa, 0x6e, 0x7a, 0x87, 0xb8, 0xc2, 0xab, + 0xf6, 0xe4, 0x6c, 0xa0, 0x15, 0xad, 0xc9, 0xec, 0x60, 0xf9, 0x2e, 0xf6, 0x58, 0xd5, 0x69, 0xb8, + 0x77, 0x89, 0x8a, 0xf3, 0x59, 0x16, 0x8f, 0x4f, 0x05, 0xf4, 0x60, 0xbf, 0x3c, 0xaa, 0xfe, 0xf1, + 0x38, 0xe6, 0x71, 0x11, 0xba, 0x25, 0x8f, 0x27, 0xa6, 0x22, 0x7a, 0x11, 0xfa, 0x5b, 0xdb, 0x4e, + 0x48, 0x12, 0x4f, 0x86, 0xfa, 0x2b, 0x14, 0x78, 0xb0, 0x5f, 0x1e, 0x53, 0x05, 0x18, 0x04, 0x73, + 0xea, 0xde, 0xa3, 0xe1, 0xa5, 0x27, 0x67, 0xd7, 0x68, 0x78, 0x7f, 0x6a, 0x41, 0xdf, 0x9a, 0x5f, + 0x3f, 0x8e, 0x2d, 0xe0, 0x55, 0x63, 0x0b, 0x78, 0x24, 0x2f, 0xc5, 0x44, 0xee, 0xea, 0x5f, 0x4e, + 0xac, 0xfe, 0xb3, 0xb9, 0x1c, 0x3a, 0x2f, 0xfc, 0x26, 0x0c, 0xb3, 0xc4, 0x15, 0xe2, 0x79, 0xd4, + 0xf3, 0xc6, 0x82, 0x2f, 0x27, 0x16, 0xfc, 0xb8, 0x46, 0xaa, 0xad, 0xf4, 0xa7, 0x60, 0x50, 0xbc, + 0xb7, 0x49, 0xbe, 0xf9, 0x15, 0xb4, 0x58, 0xe2, 0xed, 0x9f, 0x28, 0x82, 0x91, 0x28, 0x03, 0xfd, + 0x8a, 0x05, 0xb3, 0x01, 0xf7, 0xc3, 0xad, 0x2f, 0xb6, 0x03, 0xd7, 0xdb, 0xaa, 0xd6, 0xb6, 0x49, + 0xbd, 0xdd, 0x70, 0xbd, 0xad, 0x95, 0x2d, 0xcf, 0x57, 0xe0, 0xa5, 0x3b, 0xa4, 0xd6, 0x66, 0x66, + 0xb7, 0x2e, 0x59, 0x39, 0x94, 0x3f, 0xfb, 0x73, 0xf7, 0xf6, 0xcb, 0xb3, 0xf8, 0x50, 0xbc, 0xf1, + 0x21, 0xdb, 0x82, 0x7e, 0xcb, 0x82, 0x8b, 0x3c, 0x7f, 0x44, 0xef, 0xed, 0xef, 0x70, 0x5b, 0xae, + 0x48, 0x56, 0x31, 0x93, 0x75, 0x12, 0x34, 0xe7, 0x3f, 0x20, 0x3a, 0xf4, 0x62, 0xe5, 0x70, 0x75, + 0xe1, 0xc3, 0x36, 0xce, 0xfe, 0xc7, 0x45, 0x18, 0x15, 0x51, 0xd3, 0xc4, 0x19, 0xf0, 0xa2, 0x31, + 0x25, 0x1e, 0x4d, 0x4c, 0x89, 0x49, 0x83, 0xf8, 0x68, 0xb6, 0xff, 0x10, 0x26, 0xe9, 0xe6, 0x7c, + 0x85, 0x38, 0x41, 0xb4, 0x41, 0x1c, 0xee, 0xf0, 0x55, 0x3c, 0xf4, 0xee, 0xaf, 0xf4, 0x93, 0xd7, + 0x92, 0xcc, 0x70, 0x9a, 0xff, 0x77, 0xd2, 0x99, 0xe3, 0xc1, 0x44, 0x2a, 0xf0, 0xdd, 0x1b, 0x50, + 0x52, 0x8f, 0x45, 0xc4, 0xa6, 0xd3, 0x39, 0x7e, 0x64, 0x92, 0x03, 0x57, 0x7f, 0xc5, 0x0f, 0x95, + 0x62, 0x76, 0xf6, 0xdf, 0x2f, 0x18, 0x15, 0xf2, 0x41, 0x5c, 0x83, 0x21, 0x27, 0x0c, 0xdd, 0x2d, + 0x8f, 0xd4, 0x3b, 0x69, 0x28, 0x53, 0xd5, 0xb0, 0x07, 0x3b, 0x73, 0xa2, 0x24, 0x56, 0x3c, 0xd0, + 0x15, 0xee, 0x56, 0xb7, 0x4b, 0x3a, 0xa9, 0x27, 0x53, 0xdc, 0x40, 0x3a, 0xde, 0xed, 0x12, 0x2c, + 0xca, 0xa3, 0x4f, 0x70, 0xbf, 0xc7, 0xab, 0x9e, 0x7f, 0xdb, 0xbb, 0xec, 0xfb, 0x32, 0xe8, 0x46, + 0x6f, 0x0c, 0x27, 0xa5, 0xb7, 0xa3, 0x2a, 0x8e, 0x4d, 0x6e, 0xbd, 0x45, 0x92, 0xfd, 0x0c, 0xb0, + 0x78, 0xf9, 0xe6, 0xdb, 0xec, 0x10, 0x11, 0x18, 0x17, 0x21, 0xf9, 0x24, 0x4c, 0xf4, 0x5d, 0xe6, + 0x55, 0xce, 0x2c, 0x1d, 0x2b, 0xd2, 0xaf, 0x9a, 0x2c, 0x70, 0x92, 0xa7, 0xfd, 0xb3, 0x16, 0xb0, + 0x77, 0xaa, 0xc7, 0x20, 0x8f, 0x7c, 0xd8, 0x94, 0x47, 0xa6, 0xf3, 0x3a, 0x39, 0x47, 0x14, 0x79, + 0x81, 0xcf, 0xac, 0x4a, 0xe0, 0xdf, 0xd9, 0x13, 0xce, 0x2a, 0xdd, 0xef, 0x1f, 0xf6, 0xff, 0xb6, + 0xf8, 0x26, 0x16, 0xbf, 0xea, 0xff, 0x2c, 0x0c, 0xd5, 0x9c, 0x96, 0x53, 0xe3, 0x59, 0x9d, 0x72, + 0x35, 0x7a, 0x46, 0xa1, 0xd9, 0x05, 0x51, 0x82, 0x6b, 0xa8, 0x64, 0x68, 0xc7, 0x21, 0x09, 0xee, + 0xaa, 0x95, 0x52, 0x55, 0xce, 0xec, 0xc0, 0xa8, 0xc1, 0xec, 0x81, 0xaa, 0x33, 0x3e, 0xcb, 0x8f, + 0x58, 0x15, 0x8a, 0xb4, 0x09, 0x93, 0x9e, 0xf6, 0x9f, 0x1e, 0x28, 0xf2, 0x72, 0xf9, 0x78, 0xb7, + 0x43, 0x94, 0x9d, 0x3e, 0xda, 0x13, 0xd8, 0x04, 0x1b, 0x9c, 0xe6, 0x6c, 0xff, 0xa4, 0x05, 0x0f, + 0xe9, 0x84, 0xda, 0x2b, 0x9b, 0x6e, 0x46, 0x92, 0x45, 0x18, 0xf2, 0x5b, 0x24, 0x70, 0x22, 0x3f, + 0x10, 0xa7, 0xc6, 0x05, 0xd9, 0xe9, 0xd7, 0x05, 0xfc, 0x40, 0xe4, 0x28, 0x90, 0xdc, 0x25, 0x1c, + 0xab, 0x92, 0xf4, 0xf6, 0xc9, 0x3a, 0x23, 0x14, 0xef, 0xa9, 0xd8, 0x1e, 0xc0, 0x2c, 0xe9, 0x21, + 0x16, 0x18, 0xfb, 0x9b, 0x16, 0x9f, 0x58, 0x7a, 0xd3, 0xd1, 0x5b, 0x30, 0xd1, 0x74, 0xa2, 0xda, + 0xf6, 0xd2, 0x9d, 0x56, 0xc0, 0x4d, 0x4e, 0xb2, 0x9f, 0x9e, 0xee, 0xd6, 0x4f, 0xda, 0x47, 0xc6, + 0xae, 0x9c, 0xab, 0x09, 0x66, 0x38, 0xc5, 0x1e, 0x6d, 0xc0, 0x30, 0x83, 0xb1, 0xa7, 0x82, 0x61, + 0x27, 0xd1, 0x20, 0xaf, 0x36, 0xe5, 0x8c, 0xb0, 0x1a, 0xf3, 0xc1, 0x3a, 0x53, 0xfb, 0xcb, 0x45, + 0xbe, 0xda, 0x99, 0x28, 0xff, 0x14, 0x0c, 0xb6, 0xfc, 0xfa, 0xc2, 0xca, 0x22, 0x16, 0xa3, 0xa0, + 0x8e, 0x91, 0x0a, 0x07, 0x63, 0x89, 0x47, 0x17, 0x60, 0x48, 0xfc, 0x94, 0x26, 0x42, 0xb6, 0x37, + 0x0b, 0xba, 0x10, 0x2b, 0x2c, 0x7a, 0x0e, 0xa0, 0x15, 0xf8, 0xbb, 0x6e, 0x9d, 0x85, 0x0e, 0x29, + 0x9a, 0x7e, 0x44, 0x15, 0x85, 0xc1, 0x1a, 0x15, 0x7a, 0x05, 0x46, 0xdb, 0x5e, 0xc8, 0xc5, 0x11, + 0x2d, 0x9a, 0xb2, 0xf2, 0x70, 0xb9, 0xa1, 0x23, 0xb1, 0x49, 0x8b, 0xe6, 0x60, 0x20, 0x72, 0x98, + 0x5f, 0x4c, 0x7f, 0xbe, 0xbb, 0xef, 0x3a, 0xa5, 0xd0, 0x13, 0x08, 0xd1, 0x02, 0x58, 0x14, 0x44, + 0x6f, 0xc8, 0x57, 0xbb, 0x7c, 0x63, 0x17, 0x7e, 0xf6, 0xbd, 0x1d, 0x02, 0xda, 0x9b, 0x5d, 0xe1, + 0xbf, 0x6f, 0xf0, 0x42, 0x2f, 0x03, 0x90, 0x3b, 0x11, 0x09, 0x3c, 0xa7, 0xa1, 0xbc, 0xd9, 0x94, + 0x5c, 0xb0, 0xe8, 0xaf, 0xf9, 0xd1, 0x8d, 0x90, 0x2c, 0x29, 0x0a, 0xac, 0x51, 0xdb, 0xbf, 0x55, + 0x02, 0x88, 0xe5, 0x76, 0x74, 0x37, 0xb5, 0x71, 0x3d, 0xd3, 0x59, 0xd2, 0x3f, 0xba, 0x5d, 0x0b, + 0x7d, 0xbf, 0x05, 0xc3, 0x22, 0x42, 0x0a, 0x1b, 0xa1, 0x42, 0xe7, 0x8d, 0xd3, 0x0c, 0xd4, 0x42, + 0x4b, 0xf0, 0x26, 0x3c, 0x2f, 0x67, 0xa8, 0x86, 0xe9, 0xda, 0x0a, 0xbd, 0x62, 0xf4, 0x3e, 0x79, + 0x55, 0x2c, 0x1a, 0x5d, 0xa9, 0xae, 0x8a, 0x25, 0x76, 0x46, 0xe8, 0xb7, 0xc4, 0x1b, 0xc6, 0x2d, + 0xb1, 0x2f, 0xff, 0x59, 0xa2, 0x21, 0xbe, 0x76, 0xbb, 0x20, 0xa2, 0x8a, 0x1e, 0xa2, 0xa0, 0x3f, + 0xff, 0x79, 0x9e, 0x76, 0x4f, 0xea, 0x12, 0x9e, 0xe0, 0xd3, 0x30, 0x5e, 0x37, 0x85, 0x00, 0x31, + 0x13, 0x9f, 0xcc, 0xe3, 0x9b, 0x90, 0x19, 0xe2, 0x63, 0x3f, 0x81, 0xc0, 0x49, 0xc6, 0xa8, 0xc2, + 0x23, 0x56, 0xac, 0x78, 0x9b, 0xbe, 0x78, 0xeb, 0x61, 0xe7, 0x8e, 0xe5, 0x5e, 0x18, 0x91, 0x26, + 0xa5, 0x8c, 0x4f, 0xf7, 0x35, 0x51, 0x16, 0x2b, 0x2e, 0xe8, 0x35, 0x18, 0x60, 0xef, 0xb3, 0xc2, + 0xe9, 0xa1, 0x7c, 0x8d, 0xb3, 0x19, 0xba, 0x2f, 0x5e, 0x90, 0xec, 0x6f, 0x88, 0x05, 0x07, 0x74, + 0x45, 0xbe, 0x7e, 0x0c, 0x57, 0xbc, 0x1b, 0x21, 0x61, 0xaf, 0x1f, 0x4b, 0xf3, 0x8f, 0xc7, 0x0f, + 0x1b, 0x39, 0x3c, 0x33, 0xcd, 0xa0, 0x51, 0x92, 0x4a, 0x51, 0xe2, 0xbf, 0xcc, 0x5e, 0x28, 0x02, + 0x0d, 0x65, 0x36, 0xcf, 0xcc, 0x70, 0x18, 0x77, 0xe7, 0x4d, 0x93, 0x05, 0x4e, 0xf2, 0xa4, 0x12, + 0x29, 0x5f, 0xf5, 0xe2, 0xb5, 0x48, 0xb7, 0xbd, 0x83, 0x5f, 0xc4, 0xd9, 0x69, 0xc4, 0x21, 0x58, + 0x94, 0x3f, 0x56, 0xf1, 0x60, 0xc6, 0x83, 0x89, 0xe4, 0x12, 0x7d, 0xa0, 0xe2, 0xc8, 0x1f, 0xf6, + 0xc1, 0x98, 0x39, 0xa5, 0xd0, 0x45, 0x28, 0x09, 0x26, 0x2a, 0x03, 0x88, 0x5a, 0x25, 0xab, 0x12, + 0x81, 0x63, 0x1a, 0x96, 0xf8, 0x85, 0x15, 0xd7, 0xdc, 0x83, 0xe3, 0xc4, 0x2f, 0x0a, 0x83, 0x35, + 0x2a, 0x7a, 0xb1, 0xda, 0xf0, 0xfd, 0x48, 0x1d, 0x48, 0x6a, 0xde, 0xcd, 0x33, 0x28, 0x16, 0x58, + 0x7a, 0x10, 0xed, 0x90, 0xc0, 0x23, 0x0d, 0x33, 0xf2, 0xb6, 0x3a, 0x88, 0xae, 0xea, 0x48, 0x6c, + 0xd2, 0xd2, 0xe3, 0xd4, 0x0f, 0xd9, 0x44, 0x16, 0xd7, 0xb7, 0xd8, 0xdd, 0xba, 0xca, 0x5f, 0x79, + 0x4b, 0x3c, 0xfa, 0x18, 0x3c, 0xa4, 0x02, 0x67, 0x61, 0x6e, 0xcd, 0x90, 0x35, 0x0e, 0x18, 0xda, + 0x96, 0x87, 0x16, 0xb2, 0xc9, 0x70, 0x5e, 0x79, 0xf4, 0x2a, 0x8c, 0x09, 0x11, 0x5f, 0x72, 0x1c, + 0x34, 0x3d, 0x8c, 0xae, 0x1a, 0x58, 0x9c, 0xa0, 0x96, 0xb1, 0xc3, 0x99, 0x94, 0x2d, 0x39, 0x0c, + 0xa5, 0x63, 0x87, 0xeb, 0x78, 0x9c, 0x2a, 0x81, 0xe6, 0x60, 0x9c, 0xcb, 0x60, 0xae, 0xb7, 0xc5, + 0xc7, 0x44, 0x3c, 0xe6, 0x52, 0x4b, 0xea, 0xba, 0x89, 0xc6, 0x49, 0x7a, 0xf4, 0x12, 0x8c, 0x38, + 0x41, 0x6d, 0xdb, 0x8d, 0x48, 0x2d, 0x6a, 0x07, 0xfc, 0x95, 0x97, 0xe6, 0xa2, 0x35, 0xa7, 0xe1, + 0xb0, 0x41, 0x69, 0xdf, 0x85, 0xa9, 0x8c, 0xf0, 0x0f, 0x74, 0xe2, 0x38, 0x2d, 0x57, 0x7e, 0x53, + 0xc2, 0xc3, 0x79, 0xae, 0xb2, 0x22, 0xbf, 0x46, 0xa3, 0xa2, 0xb3, 0x93, 0x85, 0x89, 0xd0, 0x92, + 0x95, 0xaa, 0xd9, 0xb9, 0x2c, 0x11, 0x38, 0xa6, 0xb1, 0xff, 0x5b, 0x01, 0xc6, 0x33, 0x6c, 0x2b, + 0x2c, 0x61, 0x66, 0xe2, 0x92, 0x12, 0xe7, 0xc7, 0x34, 0x43, 0xd1, 0x17, 0x0e, 0x11, 0x8a, 0xbe, + 0xd8, 0x2d, 0x14, 0x7d, 0xdf, 0xdb, 0x09, 0x45, 0x6f, 0xf6, 0x58, 0x7f, 0x4f, 0x3d, 0x96, 0x11, + 0xbe, 0x7e, 0xe0, 0x90, 0xe1, 0xeb, 0x8d, 0x4e, 0x1f, 0xec, 0xa1, 0xd3, 0x7f, 0xb4, 0x00, 0x13, + 0x49, 0x57, 0xd2, 0x63, 0xd0, 0xdb, 0xbe, 0x66, 0xe8, 0x6d, 0x2f, 0xf4, 0xf2, 0xf8, 0x36, 0x57, + 0x87, 0x8b, 0x13, 0x3a, 0xdc, 0xf7, 0xf6, 0xc4, 0xad, 0xb3, 0x3e, 0xf7, 0xa7, 0x0a, 0x70, 0x32, + 0xf3, 0xf5, 0xef, 0x31, 0xf4, 0xcd, 0x75, 0xa3, 0x6f, 0x9e, 0xed, 0xf9, 0x61, 0x72, 0x6e, 0x07, + 0xdd, 0x4a, 0x74, 0xd0, 0xc5, 0xde, 0x59, 0x76, 0xee, 0xa5, 0xaf, 0x17, 0xe1, 0x6c, 0x66, 0xb9, + 0x58, 0xed, 0xb9, 0x6c, 0xa8, 0x3d, 0x9f, 0x4b, 0xa8, 0x3d, 0xed, 0xce, 0xa5, 0x8f, 0x46, 0x0f, + 0x2a, 0x1e, 0xe8, 0xb2, 0x30, 0x03, 0xf7, 0xa9, 0x03, 0x35, 0x1e, 0xe8, 0x2a, 0x46, 0xd8, 0xe4, + 0xfb, 0x9d, 0xa4, 0xfb, 0xfc, 0x97, 0x16, 0x9c, 0xce, 0x1c, 0x9b, 0x63, 0xd0, 0x75, 0xad, 0x99, + 0xba, 0xae, 0xa7, 0x7a, 0x9e, 0xad, 0x39, 0xca, 0xaf, 0x9f, 0xe9, 0xcf, 0xf9, 0x16, 0x76, 0x93, + 0xbf, 0x0e, 0xc3, 0x4e, 0xad, 0x46, 0xc2, 0x70, 0xd5, 0xaf, 0xab, 0x40, 0xd8, 0xcf, 0xb2, 0x7b, + 0x56, 0x0c, 0x3e, 0xd8, 0x2f, 0xcf, 0x24, 0x59, 0xc4, 0x68, 0xac, 0x73, 0x40, 0x9f, 0x80, 0xa1, + 0x50, 0x9c, 0x9b, 0x62, 0xec, 0x9f, 0xef, 0xb1, 0x73, 0x9c, 0x0d, 0xd2, 0x30, 0x23, 0x2e, 0x29, + 0x4d, 0x85, 0x62, 0x69, 0x46, 0x67, 0x29, 0x1c, 0x69, 0x74, 0x96, 0xe7, 0x00, 0x76, 0xd5, 0x65, + 0x20, 0xa9, 0x7f, 0xd0, 0xae, 0x09, 0x1a, 0x15, 0xfa, 0x08, 0x4c, 0x84, 0x3c, 0x24, 0xe1, 0x42, + 0xc3, 0x09, 0xd9, 0x3b, 0x1a, 0x31, 0x0b, 0x59, 0x54, 0xa7, 0x6a, 0x02, 0x87, 0x53, 0xd4, 0x68, + 0x59, 0xd6, 0xca, 0xe2, 0x27, 0xf2, 0x89, 0x79, 0x3e, 0xae, 0x51, 0xa4, 0xeb, 0x3e, 0x91, 0xec, + 0x7e, 0xd6, 0xf1, 0x5a, 0x49, 0xf4, 0x09, 0x00, 0x3a, 0x7d, 0x84, 0x1e, 0x62, 0x30, 0x7f, 0xf3, + 0xa4, 0xbb, 0x4a, 0x3d, 0xd3, 0xb9, 0x99, 0xbd, 0xa9, 0x5d, 0x54, 0x4c, 0xb0, 0xc6, 0x10, 0x39, + 0x30, 0x1a, 0xff, 0x8b, 0xb3, 0xd9, 0x5e, 0xc8, 0xad, 0x21, 0xc9, 0x9c, 0xa9, 0xbc, 0x17, 0x75, + 0x16, 0xd8, 0xe4, 0x68, 0xff, 0xf8, 0x20, 0x3c, 0xdc, 0x61, 0x1b, 0x46, 0x73, 0xa6, 0xa9, 0xf7, + 0xe9, 0xe4, 0xfd, 0x7d, 0x26, 0xb3, 0xb0, 0x71, 0xa1, 0x4f, 0xcc, 0xf6, 0xc2, 0xdb, 0x9e, 0xed, + 0x3f, 0x6c, 0x69, 0x9a, 0x15, 0xee, 0x54, 0xfa, 0xe1, 0x43, 0x1e, 0x2f, 0x47, 0xa8, 0x6a, 0xd9, + 0xcc, 0xd0, 0x57, 0x3c, 0xd7, 0x73, 0x73, 0x7a, 0x57, 0x60, 0x7c, 0x35, 0x3b, 0x0e, 0x2f, 0x57, + 0x65, 0x5c, 0x3e, 0xec, 0xf7, 0x1f, 0x57, 0x4c, 0xde, 0x8f, 0xc9, 0xe8, 0x4b, 0xbc, 0x5e, 0xb1, + 0xd6, 0x5e, 0x8c, 0xc3, 0x29, 0xa9, 0xb3, 0xf4, 0xd1, 0xcc, 0xe6, 0xea, 0x44, 0xd8, 0x60, 0x75, + 0xbc, 0x57, 0xef, 0x6f, 0x51, 0x10, 0xe0, 0xdf, 0xb1, 0xe0, 0x4c, 0xc7, 0x88, 0x30, 0xdf, 0x86, + 0xb2, 0xa1, 0xfd, 0x39, 0x0b, 0xb2, 0x07, 0xdb, 0xf0, 0x28, 0xbb, 0x08, 0xa5, 0x5a, 0x22, 0xef, + 0x66, 0x1c, 0x1b, 0x41, 0xe5, 0xdc, 0x8c, 0x69, 0x0c, 0xc7, 0xb1, 0x42, 0x57, 0xc7, 0xb1, 0x5f, + 0xb7, 0x20, 0xb5, 0xbf, 0x1f, 0x83, 0xa0, 0xb1, 0x62, 0x0a, 0x1a, 0x8f, 0xf7, 0xd2, 0x9b, 0x39, + 0x32, 0xc6, 0x9f, 0x8c, 0xc3, 0xa9, 0x9c, 0x17, 0x79, 0xbb, 0x30, 0xb9, 0x55, 0x23, 0xe6, 0xe3, + 0xea, 0x4e, 0x41, 0x87, 0x3a, 0xbe, 0xc4, 0xe6, 0xe9, 0x4e, 0x53, 0x24, 0x38, 0x5d, 0x05, 0xfa, + 0x9c, 0x05, 0x27, 0x9c, 0xdb, 0xe1, 0x12, 0x15, 0x18, 0xdd, 0xda, 0x7c, 0xc3, 0xaf, 0xed, 0xd0, + 0xd3, 0x58, 0x2e, 0x84, 0x17, 0x32, 0x95, 0x78, 0xb7, 0xaa, 0x29, 0x7a, 0xa3, 0x7a, 0x96, 0xdc, + 0x3a, 0x8b, 0x0a, 0x67, 0xd6, 0x85, 0xb0, 0x48, 0xed, 0x41, 0xaf, 0xa3, 0x1d, 0x9e, 0xff, 0x67, + 0x3d, 0x9d, 0xe4, 0x12, 0x90, 0xc4, 0x60, 0xc5, 0x07, 0x7d, 0x0a, 0x4a, 0x5b, 0xf2, 0xa5, 0x6f, + 0x86, 0x84, 0x15, 0x77, 0x64, 0xe7, 0xf7, 0xcf, 0xdc, 0x12, 0xaf, 0x88, 0x70, 0xcc, 0x14, 0xbd, + 0x0a, 0x45, 0x6f, 0x33, 0xec, 0x94, 0x1f, 0x3a, 0xe1, 0x72, 0xc9, 0x83, 0x6c, 0xac, 0x2d, 0x57, + 0x31, 0x2d, 0x88, 0xae, 0x40, 0x31, 0xd8, 0xa8, 0x0b, 0x0d, 0x74, 0xe6, 0x22, 0xc5, 0xf3, 0x8b, + 0x39, 0xad, 0x62, 0x9c, 0xf0, 0xfc, 0x22, 0xa6, 0x2c, 0x50, 0x05, 0xfa, 0xd9, 0x33, 0x36, 0x21, + 0xcf, 0x64, 0xde, 0xdc, 0x3a, 0x3c, 0x07, 0xe5, 0x91, 0x38, 0x18, 0x01, 0xe6, 0x8c, 0xd0, 0x3a, + 0x0c, 0xd4, 0x58, 0x2e, 0x61, 0x21, 0xc0, 0xbc, 0x2f, 0x53, 0xd7, 0xdc, 0x21, 0xc9, 0xb2, 0x50, + 0xbd, 0x32, 0x0a, 0x2c, 0x78, 0x31, 0xae, 0xa4, 0xb5, 0xbd, 0x19, 0x8a, 0x5c, 0xfb, 0xd9, 0x5c, + 0x3b, 0xe4, 0x0e, 0x17, 0x5c, 0x19, 0x05, 0x16, 0xbc, 0xd0, 0xcb, 0x50, 0xd8, 0xac, 0x89, 0x27, + 0x6a, 0x99, 0x4a, 0x67, 0x33, 0x4e, 0xca, 0xfc, 0xc0, 0xbd, 0xfd, 0x72, 0x61, 0x79, 0x01, 0x17, + 0x36, 0x6b, 0x68, 0x0d, 0x06, 0x37, 0x79, 0x64, 0x05, 0xa1, 0x57, 0x7e, 0x32, 0x3b, 0xe8, 0x43, + 0x2a, 0xf8, 0x02, 0x7f, 0xee, 0x24, 0x10, 0x58, 0x32, 0x61, 0x99, 0x26, 0x54, 0x84, 0x08, 0x11, + 0xa0, 0x6e, 0xf6, 0x70, 0x51, 0x3d, 0xb8, 0x7c, 0x19, 0xc7, 0x99, 0xc0, 0x1a, 0x47, 0x3a, 0xab, + 0x9d, 0xbb, 0xed, 0x80, 0x85, 0x1a, 0x17, 0x91, 0x8c, 0x32, 0x67, 0xf5, 0x9c, 0x24, 0xea, 0x34, + 0xab, 0x15, 0x11, 0x8e, 0x99, 0xa2, 0x1d, 0x18, 0xdd, 0x0d, 0x5b, 0xdb, 0x44, 0x2e, 0x69, 0x16, + 0xd8, 0x28, 0x47, 0x3e, 0xba, 0x29, 0x08, 0xdd, 0x20, 0x6a, 0x3b, 0x8d, 0xd4, 0x2e, 0xc4, 0x64, + 0xd9, 0x9b, 0x3a, 0x33, 0x6c, 0xf2, 0xa6, 0xdd, 0xff, 0x56, 0xdb, 0xdf, 0xd8, 0x8b, 0x88, 0x88, + 0x2b, 0x97, 0xd9, 0xfd, 0xaf, 0x73, 0x92, 0x74, 0xf7, 0x0b, 0x04, 0x96, 0x4c, 0xd0, 0x4d, 0xd1, + 0x3d, 0x6c, 0xf7, 0x9c, 0xc8, 0x8f, 0x30, 0x3b, 0x27, 0x89, 0x72, 0x3a, 0x85, 0xed, 0x96, 0x31, + 0x2b, 0xb6, 0x4b, 0xb6, 0xb6, 0xfd, 0xc8, 0xf7, 0x12, 0x3b, 0xf4, 0x64, 0xfe, 0x2e, 0x59, 0xc9, + 0xa0, 0x4f, 0xef, 0x92, 0x59, 0x54, 0x38, 0xb3, 0x2e, 0x54, 0x87, 0xb1, 0x96, 0x1f, 0x44, 0xb7, + 0xfd, 0x40, 0xce, 0x2f, 0xd4, 0x41, 0x2f, 0x66, 0x50, 0x8a, 0x1a, 0x59, 0xc8, 0x46, 0x13, 0x83, + 0x13, 0x3c, 0xd1, 0x47, 0x61, 0x30, 0xac, 0x39, 0x0d, 0xb2, 0x72, 0x7d, 0x7a, 0x2a, 0xff, 0xf8, + 0xa9, 0x72, 0x92, 0x9c, 0xd9, 0xc5, 0x03, 0x63, 0x70, 0x12, 0x2c, 0xd9, 0xa1, 0x65, 0xe8, 0x67, + 0xe9, 0x16, 0x59, 0x10, 0xc4, 0x9c, 0x40, 0xb9, 0x29, 0x07, 0x78, 0xbe, 0x37, 0x31, 0x30, 0xe6, + 0xc5, 0xe9, 0x1a, 0x10, 0xd7, 0x43, 0x3f, 0x9c, 0x3e, 0x99, 0xbf, 0x06, 0xc4, 0xad, 0xf2, 0x7a, + 0xb5, 0xd3, 0x1a, 0x50, 0x44, 0x38, 0x66, 0x4a, 0x77, 0x66, 0xba, 0x9b, 0x9e, 0xea, 0xe0, 0xb9, + 0x95, 0xbb, 0x97, 0xb2, 0x9d, 0x99, 0xee, 0xa4, 0x94, 0x85, 0xfd, 0x07, 0x83, 0x69, 0x99, 0x85, + 0x29, 0x14, 0xfe, 0x82, 0x95, 0xb2, 0x35, 0xbf, 0xbf, 0x57, 0xfd, 0xe6, 0x11, 0x5e, 0x85, 0x3e, + 0x67, 0xc1, 0xa9, 0x56, 0xe6, 0x87, 0x08, 0x01, 0xa0, 0x37, 0x35, 0x29, 0xff, 0x74, 0x15, 0x30, + 0x33, 0x1b, 0x8f, 0x73, 0x6a, 0x4a, 0x5e, 0x37, 0x8b, 0x6f, 0xfb, 0xba, 0xb9, 0x0a, 0x43, 0x35, + 0x7e, 0x15, 0xe9, 0x98, 0x5b, 0x3f, 0x79, 0xf7, 0x66, 0xa2, 0x84, 0xb8, 0xc3, 0x6c, 0x62, 0xc5, + 0x02, 0xfd, 0x88, 0x05, 0x67, 0x92, 0x4d, 0xc7, 0x84, 0xa1, 0x45, 0x94, 0x4d, 0xae, 0xcb, 0x58, + 0x16, 0xdf, 0x9f, 0x92, 0xff, 0x0d, 0xe2, 0x83, 0x6e, 0x04, 0xb8, 0x73, 0x65, 0x68, 0x31, 0x43, + 0x99, 0x32, 0x60, 0x1a, 0x90, 0x7a, 0x50, 0xa8, 0xbc, 0x00, 0x23, 0x4d, 0xbf, 0xed, 0x45, 0xc2, + 0xd1, 0x4b, 0x38, 0x9d, 0x30, 0x67, 0x8b, 0x55, 0x0d, 0x8e, 0x0d, 0xaa, 0x84, 0x1a, 0x66, 0xe8, + 0xbe, 0xd5, 0x30, 0x6f, 0xc2, 0x88, 0xa7, 0x79, 0x26, 0x0b, 0x79, 0xe0, 0x7c, 0x7e, 0x84, 0x5c, + 0xdd, 0x8f, 0x99, 0xb7, 0x52, 0x87, 0x60, 0x83, 0xdb, 0xf1, 0x7a, 0x80, 0x7d, 0xc9, 0xca, 0x10, + 0xea, 0xb9, 0x2a, 0xe6, 0x43, 0xa6, 0x2a, 0xe6, 0x7c, 0x52, 0x15, 0x93, 0x32, 0x1e, 0x18, 0x5a, + 0x98, 0xde, 0xb3, 0x3b, 0xf5, 0x1a, 0x65, 0xd3, 0x6e, 0xc0, 0xb9, 0x6e, 0xc7, 0x12, 0xf3, 0xf8, + 0xab, 0x2b, 0x53, 0x71, 0xec, 0xf1, 0x57, 0x5f, 0x59, 0xc4, 0x0c, 0xd3, 0x6b, 0xfc, 0x26, 0xfb, + 0xbf, 0x58, 0x50, 0xac, 0xf8, 0xf5, 0x63, 0xb8, 0xf0, 0x7e, 0xd8, 0xb8, 0xf0, 0x3e, 0x9c, 0x7d, + 0x20, 0xd6, 0x73, 0x4d, 0x1f, 0x4b, 0x09, 0xd3, 0xc7, 0x99, 0x3c, 0x06, 0x9d, 0x0d, 0x1d, 0x3f, + 0x5d, 0x84, 0xe1, 0x8a, 0x5f, 0x57, 0xee, 0xf6, 0xff, 0xf4, 0x7e, 0xdc, 0xed, 0x73, 0x73, 0x65, + 0x68, 0x9c, 0x99, 0xa3, 0xa0, 0x7c, 0x69, 0xfc, 0x6d, 0xe6, 0x75, 0x7f, 0x8b, 0xb8, 0x5b, 0xdb, + 0x11, 0xa9, 0x27, 0x3f, 0xe7, 0xf8, 0xbc, 0xee, 0xff, 0xa0, 0x00, 0xe3, 0x89, 0xda, 0x51, 0x03, + 0x46, 0x1b, 0xba, 0x62, 0x5d, 0xcc, 0xd3, 0xfb, 0xd2, 0xc9, 0x0b, 0xaf, 0x65, 0x0d, 0x84, 0x4d, + 0xe6, 0x68, 0x16, 0x40, 0x59, 0x9a, 0xa5, 0x7a, 0x95, 0x49, 0xfd, 0xca, 0x14, 0x1d, 0x62, 0x8d, + 0x02, 0xbd, 0x08, 0xc3, 0x91, 0xdf, 0xf2, 0x1b, 0xfe, 0xd6, 0xde, 0x55, 0x22, 0x43, 0x7b, 0x29, + 0x5f, 0xc4, 0xf5, 0x18, 0x85, 0x75, 0x3a, 0x74, 0x07, 0x26, 0x15, 0x93, 0xea, 0x11, 0x18, 0x1b, + 0x98, 0x56, 0x61, 0x2d, 0xc9, 0x11, 0xa7, 0x2b, 0xb1, 0x7f, 0xae, 0xc8, 0xbb, 0xd8, 0x8b, 0xdc, + 0x77, 0x57, 0xc3, 0x3b, 0x7b, 0x35, 0x7c, 0xdd, 0x82, 0x09, 0x5a, 0x3b, 0x73, 0xb4, 0x92, 0xc7, + 0xbc, 0x8a, 0xc9, 0x6d, 0x75, 0x88, 0xc9, 0x7d, 0x9e, 0xee, 0x9a, 0x75, 0xbf, 0x1d, 0x09, 0xdd, + 0x9d, 0xb6, 0x2d, 0x52, 0x28, 0x16, 0x58, 0x41, 0x47, 0x82, 0x40, 0x3c, 0x0e, 0xd5, 0xe9, 0x48, + 0x10, 0x60, 0x81, 0x95, 0x21, 0xbb, 0xfb, 0xb2, 0x43, 0x76, 0xf3, 0xc8, 0xab, 0xc2, 0x25, 0x47, + 0x08, 0x5c, 0x5a, 0xe4, 0x55, 0xe9, 0xab, 0x13, 0xd3, 0xd8, 0x5f, 0x2d, 0xc2, 0x48, 0xc5, 0xaf, + 0xc7, 0x56, 0xe6, 0x17, 0x0c, 0x2b, 0xf3, 0xb9, 0x84, 0x95, 0x79, 0x42, 0xa7, 0x7d, 0xd7, 0xa6, + 0xfc, 0xad, 0xb2, 0x29, 0xff, 0x9a, 0xc5, 0x46, 0x6d, 0x71, 0xad, 0xca, 0xfd, 0xf6, 0xd0, 0x25, + 0x18, 0x66, 0x1b, 0x0c, 0x7b, 0x8d, 0x2c, 0x4d, 0xaf, 0x2c, 0xdf, 0xd5, 0x5a, 0x0c, 0xc6, 0x3a, + 0x0d, 0xba, 0x00, 0x43, 0x21, 0x71, 0x82, 0xda, 0xb6, 0xda, 0x5d, 0x85, 0x9d, 0x94, 0xc3, 0xb0, + 0xc2, 0xa2, 0xd7, 0xe3, 0xa0, 0x9f, 0xc5, 0xfc, 0xd7, 0x8d, 0x7a, 0x7b, 0xf8, 0x12, 0xc9, 0x8f, + 0xf4, 0x69, 0xdf, 0x02, 0x94, 0xa6, 0xef, 0x21, 0x2c, 0x5d, 0xd9, 0x0c, 0x4b, 0x57, 0x4a, 0x85, + 0xa4, 0xfb, 0x33, 0x0b, 0xc6, 0x2a, 0x7e, 0x9d, 0x2e, 0xdd, 0xef, 0xa4, 0x75, 0xaa, 0x47, 0x3c, + 0x1e, 0xe8, 0x10, 0xf1, 0xf8, 0x31, 0xe8, 0xaf, 0xf8, 0xf5, 0x95, 0x4a, 0xa7, 0xd0, 0x02, 0xf6, + 0xdf, 0xb4, 0x60, 0xb0, 0xe2, 0xd7, 0x8f, 0xc1, 0x2c, 0xf0, 0x21, 0xd3, 0x2c, 0xf0, 0x50, 0xce, + 0xbc, 0xc9, 0xb1, 0x04, 0xfc, 0x8d, 0x3e, 0x18, 0xa5, 0xed, 0xf4, 0xb7, 0xe4, 0x50, 0x1a, 0xdd, + 0x66, 0xf5, 0xd0, 0x6d, 0x54, 0x0a, 0xf7, 0x1b, 0x0d, 0xff, 0x76, 0x72, 0x58, 0x97, 0x19, 0x14, + 0x0b, 0x2c, 0x7a, 0x06, 0x86, 0x5a, 0x01, 0xd9, 0x75, 0x7d, 0x21, 0xde, 0x6a, 0x46, 0x96, 0x8a, + 0x80, 0x63, 0x45, 0x41, 0xaf, 0x85, 0xa1, 0xeb, 0xd1, 0xa3, 0xbc, 0xe6, 0x7b, 0x75, 0xae, 0x39, + 0x2f, 0x8a, 0xb4, 0x1c, 0x1a, 0x1c, 0x1b, 0x54, 0xe8, 0x16, 0x94, 0xd8, 0x7f, 0xb6, 0xed, 0x1c, + 0x3e, 0x7b, 0xaf, 0xc8, 0x2a, 0x28, 0x18, 0xe0, 0x98, 0x17, 0x7a, 0x0e, 0x20, 0x92, 0xa1, 0xed, + 0x43, 0x11, 0x68, 0x4d, 0x5d, 0x05, 0x54, 0xd0, 0xfb, 0x10, 0x6b, 0x54, 0xe8, 0x69, 0x28, 0x45, + 0x8e, 0xdb, 0xb8, 0xe6, 0x7a, 0x24, 0x64, 0x1a, 0xf1, 0xa2, 0x4c, 0xee, 0x27, 0x80, 0x38, 0xc6, + 0x53, 0x51, 0x8c, 0x05, 0xe1, 0xe0, 0xb9, 0xcb, 0x87, 0x18, 0x35, 0x13, 0xc5, 0xae, 0x29, 0x28, + 0xd6, 0x28, 0xd0, 0x36, 0x3c, 0xe2, 0x7a, 0x2c, 0x85, 0x05, 0xa9, 0xee, 0xb8, 0xad, 0xf5, 0x6b, + 0xd5, 0x9b, 0x24, 0x70, 0x37, 0xf7, 0xe6, 0x9d, 0xda, 0x0e, 0xf1, 0x64, 0x5e, 0x56, 0x99, 0xae, + 0xfb, 0x91, 0x95, 0x0e, 0xb4, 0xb8, 0x23, 0x27, 0xfb, 0x79, 0x36, 0xdf, 0xaf, 0x57, 0xd1, 0x7b, + 0x8d, 0xad, 0xe3, 0x94, 0xbe, 0x75, 0x1c, 0xec, 0x97, 0x07, 0xae, 0x57, 0xb5, 0x18, 0x12, 0x2f, + 0xc1, 0xc9, 0x8a, 0x5f, 0xaf, 0xf8, 0x41, 0xb4, 0xec, 0x07, 0xb7, 0x9d, 0xa0, 0x2e, 0xa7, 0x57, + 0x59, 0x46, 0xd1, 0xa0, 0xfb, 0x67, 0x3f, 0xdf, 0x5d, 0x8c, 0x08, 0x19, 0xcf, 0x33, 0x89, 0xed, + 0x90, 0x6f, 0xbf, 0x6a, 0x4c, 0x76, 0x50, 0x49, 0x60, 0x2e, 0x3b, 0x11, 0x41, 0xd7, 0x59, 0xe6, + 0xf5, 0xf8, 0x18, 0x15, 0xc5, 0x9f, 0xd2, 0x32, 0xaf, 0xc7, 0xc8, 0xcc, 0x73, 0xd7, 0x2c, 0x6f, + 0x7f, 0x56, 0x54, 0xc2, 0xef, 0xe0, 0xdc, 0xbf, 0xae, 0x97, 0xd4, 0xc5, 0x32, 0x4b, 0x44, 0x21, + 0x3f, 0xbd, 0x00, 0xb7, 0x7a, 0x76, 0xcc, 0x12, 0x61, 0xbf, 0x08, 0x93, 0xf4, 0xea, 0xa7, 0xe4, + 0x28, 0xf6, 0x91, 0xdd, 0xa3, 0x79, 0xfc, 0xd7, 0x7e, 0x76, 0x0e, 0x24, 0xd2, 0x9f, 0xa0, 0x4f, + 0xc2, 0x58, 0x48, 0xae, 0xb9, 0x5e, 0xfb, 0x8e, 0x54, 0xbc, 0x74, 0x78, 0x73, 0x58, 0x5d, 0xd2, + 0x29, 0xb9, 0xfa, 0xd6, 0x84, 0xe1, 0x04, 0x37, 0xd4, 0x84, 0xb1, 0xdb, 0xae, 0x57, 0xf7, 0x6f, + 0x87, 0x92, 0xff, 0x50, 0xbe, 0x16, 0xf7, 0x16, 0xa7, 0x4c, 0xb4, 0xd1, 0xa8, 0xee, 0x96, 0xc1, + 0x0c, 0x27, 0x98, 0xd3, 0xb5, 0x16, 0xb4, 0xbd, 0xb9, 0xf0, 0x46, 0x48, 0x02, 0x91, 0xf9, 0x9f, + 0xa7, 0xe5, 0x95, 0x40, 0x1c, 0xe3, 0xe9, 0x5a, 0x63, 0x7f, 0x2e, 0x07, 0x7e, 0x9b, 0xe7, 0xda, + 0x10, 0x6b, 0x0d, 0x2b, 0x28, 0xd6, 0x28, 0xe8, 0x5e, 0xc4, 0xfe, 0xad, 0xf9, 0x1e, 0xf6, 0xfd, + 0x48, 0xee, 0x5e, 0xcc, 0x13, 0x41, 0x83, 0x63, 0x83, 0x0a, 0x2d, 0x03, 0x0a, 0xdb, 0xad, 0x56, + 0x83, 0x39, 0x33, 0x39, 0x0d, 0xc6, 0x8a, 0x7b, 0x79, 0x14, 0x79, 0xac, 0xe0, 0x6a, 0x0a, 0x8b, + 0x33, 0x4a, 0xd0, 0x63, 0x69, 0x53, 0x34, 0xb5, 0x9f, 0x35, 0x95, 0x5b, 0x7c, 0xaa, 0xbc, 0x9d, + 0x12, 0x87, 0x96, 0x60, 0x30, 0xdc, 0x0b, 0x6b, 0x91, 0x08, 0xed, 0x98, 0x93, 0x46, 0xab, 0xca, + 0x48, 0xb4, 0x2c, 0x8e, 0xbc, 0x08, 0x96, 0x65, 0x51, 0x0d, 0xa6, 0x04, 0xc7, 0x85, 0x6d, 0xc7, + 0x53, 0xf9, 0x82, 0xb8, 0x4f, 0xf7, 0xa5, 0x7b, 0xfb, 0xe5, 0x29, 0x51, 0xb3, 0x8e, 0x3e, 0xd8, + 0x2f, 0x9f, 0xaa, 0xf8, 0xf5, 0x0c, 0x0c, 0xce, 0xe2, 0xc6, 0x27, 0x5f, 0xad, 0xe6, 0x37, 0x5b, + 0x95, 0xc0, 0xdf, 0x74, 0x1b, 0xa4, 0x93, 0xd5, 0xac, 0x6a, 0x50, 0x8a, 0xc9, 0x67, 0xc0, 0x70, + 0x82, 0x9b, 0xfd, 0x59, 0x26, 0xba, 0xb1, 0x64, 0xf1, 0x51, 0x3b, 0x20, 0xa8, 0x09, 0xa3, 0x2d, + 0xb6, 0xb8, 0x45, 0x06, 0x0c, 0x31, 0xd7, 0x5f, 0xe8, 0x51, 0xfb, 0x73, 0x9b, 0xe5, 0xf5, 0x32, + 0x3c, 0xa3, 0x2a, 0x3a, 0x3b, 0x6c, 0x72, 0xb7, 0xff, 0xf5, 0x69, 0x76, 0xf8, 0x57, 0xb9, 0x4a, + 0x67, 0x50, 0x3c, 0x21, 0x11, 0xb7, 0xc8, 0x99, 0x7c, 0xdd, 0x62, 0x3c, 0x2c, 0xe2, 0x19, 0x0a, + 0x96, 0x65, 0xd1, 0x27, 0x60, 0x8c, 0x5e, 0xca, 0xd4, 0x01, 0x1c, 0x4e, 0x9f, 0xc8, 0x0f, 0xf5, + 0xa1, 0xa8, 0xf4, 0xec, 0x38, 0x7a, 0x61, 0x9c, 0x60, 0x86, 0x5e, 0x67, 0x9e, 0x48, 0x92, 0x75, + 0xa1, 0x17, 0xd6, 0xba, 0xd3, 0x91, 0x64, 0xab, 0x31, 0x41, 0x6d, 0x98, 0x4a, 0x27, 0xec, 0x0b, + 0xa7, 0xed, 0x7c, 0xe9, 0x36, 0x9d, 0x73, 0x2f, 0x4e, 0x63, 0x92, 0xc6, 0x85, 0x38, 0x8b, 0x3f, + 0xba, 0x06, 0xa3, 0x22, 0x63, 0xba, 0x98, 0xb9, 0x45, 0x43, 0xe5, 0x39, 0x8a, 0x75, 0xe4, 0x41, + 0x12, 0x80, 0xcd, 0xc2, 0x68, 0x0b, 0xce, 0x68, 0x49, 0xae, 0x2e, 0x07, 0x0e, 0xf3, 0x5b, 0x70, + 0xd9, 0x76, 0xaa, 0x89, 0x25, 0x8f, 0xde, 0xdb, 0x2f, 0x9f, 0x59, 0xef, 0x44, 0x88, 0x3b, 0xf3, + 0x41, 0xd7, 0xe1, 0x24, 0x7f, 0xa8, 0xbe, 0x48, 0x9c, 0x7a, 0xc3, 0xf5, 0x94, 0xdc, 0xc3, 0x97, + 0xfc, 0xe9, 0x7b, 0xfb, 0xe5, 0x93, 0x73, 0x59, 0x04, 0x38, 0xbb, 0x1c, 0xfa, 0x10, 0x94, 0xea, + 0x5e, 0x28, 0xfa, 0x60, 0xc0, 0xc8, 0x23, 0x56, 0x5a, 0x5c, 0xab, 0xaa, 0xef, 0x8f, 0xff, 0xe0, + 0xb8, 0x00, 0xda, 0xe2, 0x6a, 0x71, 0xa5, 0xac, 0x19, 0x4c, 0x05, 0xea, 0x4a, 0xea, 0x33, 0x8d, + 0xa7, 0xaa, 0xdc, 0x1e, 0xa4, 0x5e, 0x70, 0x18, 0xaf, 0x58, 0x0d, 0xc6, 0xe8, 0x35, 0x40, 0x22, + 0x5e, 0xfd, 0x5c, 0x8d, 0xa5, 0x57, 0x61, 0x56, 0x84, 0x21, 0xf3, 0xf1, 0x64, 0x35, 0x45, 0x81, + 0x33, 0x4a, 0xa1, 0x2b, 0x74, 0x57, 0xd1, 0xa1, 0x62, 0xd7, 0x52, 0xa9, 0x25, 0x17, 0x49, 0x2b, + 0x20, 0xcc, 0x0f, 0xcb, 0xe4, 0x88, 0x13, 0xe5, 0x50, 0x1d, 0x1e, 0x71, 0xda, 0x91, 0xcf, 0x2c, + 0x0e, 0x26, 0xe9, 0xba, 0xbf, 0x43, 0x3c, 0x66, 0xec, 0x1b, 0x9a, 0x3f, 0x47, 0x05, 0xab, 0xb9, + 0x0e, 0x74, 0xb8, 0x23, 0x17, 0x2a, 0x10, 0xab, 0x5c, 0xd2, 0x60, 0x86, 0x1f, 0xcb, 0xc8, 0x27, + 0xfd, 0x22, 0x0c, 0x6f, 0xfb, 0x61, 0xb4, 0x46, 0xa2, 0xdb, 0x7e, 0xb0, 0x23, 0xc2, 0xe8, 0xc6, + 0x41, 0xc9, 0x63, 0x14, 0xd6, 0xe9, 0xe8, 0x8d, 0x97, 0xb9, 0xa2, 0xac, 0x2c, 0x32, 0x2f, 0x80, + 0xa1, 0x78, 0x8f, 0xb9, 0xc2, 0xc1, 0x58, 0xe2, 0x25, 0xe9, 0x4a, 0x65, 0x81, 0x59, 0xf4, 0x13, + 0xa4, 0x2b, 0x95, 0x05, 0x2c, 0xf1, 0x74, 0xba, 0x86, 0xdb, 0x4e, 0x40, 0x2a, 0x81, 0x5f, 0x23, + 0xa1, 0x16, 0x0a, 0xff, 0x61, 0x1e, 0x24, 0x98, 0x4e, 0xd7, 0x6a, 0x16, 0x01, 0xce, 0x2e, 0x87, + 0x48, 0x3a, 0xc1, 0xdb, 0x58, 0xbe, 0x29, 0x26, 0x2d, 0xcf, 0xf4, 0x98, 0xe3, 0xcd, 0x83, 0x09, + 0x95, 0x5a, 0x8e, 0x87, 0x05, 0x0e, 0xa7, 0xc7, 0xd9, 0xdc, 0xee, 0x3d, 0xa6, 0xb0, 0x32, 0x6e, + 0xad, 0x24, 0x38, 0xe1, 0x14, 0x6f, 0x23, 0xc2, 0xdc, 0x44, 0xd7, 0x08, 0x73, 0x17, 0xa1, 0x14, + 0xb6, 0x37, 0xea, 0x7e, 0xd3, 0x71, 0x3d, 0x66, 0xd1, 0xd7, 0xae, 0x5e, 0x55, 0x89, 0xc0, 0x31, + 0x0d, 0x5a, 0x86, 0x21, 0x47, 0x5a, 0xae, 0x50, 0x7e, 0x4c, 0x21, 0x65, 0xaf, 0xe2, 0x61, 0x36, + 0xa4, 0xad, 0x4a, 0x95, 0x45, 0xaf, 0xc0, 0xa8, 0x78, 0x68, 0x2d, 0x52, 0xa7, 0x4e, 0x99, 0xaf, + 0xe1, 0xaa, 0x3a, 0x12, 0x9b, 0xb4, 0xe8, 0x06, 0x0c, 0x47, 0x7e, 0x83, 0x3d, 0xe9, 0xa2, 0x62, + 0xde, 0xa9, 0xfc, 0xe8, 0x78, 0xeb, 0x8a, 0x4c, 0x57, 0x1a, 0xab, 0xa2, 0x58, 0xe7, 0x83, 0xd6, + 0xf9, 0x7c, 0x67, 0x81, 0xef, 0x49, 0x28, 0x72, 0x6f, 0x9e, 0xc9, 0x73, 0xc7, 0x62, 0x64, 0xe6, + 0x72, 0x10, 0x25, 0xb1, 0xce, 0x06, 0x5d, 0x86, 0xc9, 0x56, 0xe0, 0xfa, 0x6c, 0x4e, 0x28, 0xa3, + 0xe5, 0xb4, 0x99, 0xe6, 0xaa, 0x92, 0x24, 0xc0, 0xe9, 0x32, 0xec, 0x9d, 0xbc, 0x00, 0x4e, 0x9f, + 0xe6, 0xa9, 0x3a, 0xf8, 0x4d, 0x96, 0xc3, 0xb0, 0xc2, 0xa2, 0x55, 0xb6, 0x13, 0x73, 0x25, 0xcc, + 0xf4, 0x4c, 0x7e, 0x18, 0x23, 0x5d, 0x59, 0xc3, 0x85, 0x57, 0xf5, 0x17, 0xc7, 0x1c, 0x50, 0x5d, + 0xcb, 0x90, 0x49, 0xaf, 0x00, 0xe1, 0xf4, 0x23, 0x1d, 0xfc, 0x01, 0x13, 0x97, 0xa2, 0x58, 0x20, + 0x30, 0xc0, 0x21, 0x4e, 0xf0, 0x44, 0x1f, 0x81, 0x09, 0x11, 0x7c, 0x31, 0xee, 0xa6, 0x33, 0xb1, + 0xa3, 0x3c, 0x4e, 0xe0, 0x70, 0x8a, 0x9a, 0xa7, 0xca, 0x70, 0x36, 0x1a, 0x44, 0x6c, 0x7d, 0xd7, + 0x5c, 0x6f, 0x27, 0x9c, 0x3e, 0xcb, 0xf6, 0x07, 0x91, 0x2a, 0x23, 0x89, 0xc5, 0x19, 0x25, 0xd0, + 0x3a, 0x4c, 0xb4, 0x02, 0x42, 0x9a, 0x4c, 0xd0, 0x17, 0xe7, 0x59, 0x99, 0x87, 0x89, 0xa0, 0x2d, + 0xa9, 0x24, 0x70, 0x07, 0x19, 0x30, 0x9c, 0xe2, 0x80, 0x6e, 0xc3, 0x90, 0xbf, 0x4b, 0x82, 0x6d, + 0xe2, 0xd4, 0xa7, 0xcf, 0x75, 0x78, 0xb8, 0x21, 0x0e, 0xb7, 0xeb, 0x82, 0x36, 0xe1, 0xe8, 0x20, + 0xc1, 0xdd, 0x1d, 0x1d, 0x64, 0x65, 0xe8, 0x2f, 0x5a, 0x70, 0x5a, 0xda, 0x46, 0xaa, 0x2d, 0xda, + 0xeb, 0x0b, 0xbe, 0x17, 0x46, 0x01, 0x0f, 0x6c, 0xf0, 0x68, 0xfe, 0x63, 0xff, 0xf5, 0x9c, 0x42, + 0x4a, 0x0f, 0x7c, 0x3a, 0x8f, 0x22, 0xc4, 0xf9, 0x35, 0xa2, 0x05, 0x98, 0x0c, 0x49, 0x24, 0x37, + 0xa3, 0xb9, 0x70, 0xf9, 0xf5, 0xc5, 0xb5, 0xe9, 0xc7, 0x78, 0x54, 0x06, 0xba, 0x18, 0xaa, 0x49, + 0x24, 0x4e, 0xd3, 0xa3, 0x4b, 0x50, 0xf0, 0xc3, 0xe9, 0xc7, 0x3b, 0x24, 0x55, 0xf5, 0xeb, 0xd7, + 0xab, 0xdc, 0xe1, 0xed, 0x7a, 0x15, 0x17, 0xfc, 0x50, 0xa6, 0xab, 0xa0, 0xf7, 0xb1, 0x70, 0xfa, + 0x09, 0xae, 0x35, 0x94, 0xe9, 0x2a, 0x18, 0x10, 0xc7, 0x78, 0xb4, 0x0d, 0xe3, 0xa1, 0x71, 0xef, + 0x0d, 0xa7, 0xcf, 0xb3, 0x9e, 0x7a, 0x22, 0x6f, 0xd0, 0x0c, 0x6a, 0x2d, 0xda, 0xbc, 0xc9, 0x05, + 0x27, 0xd9, 0xf2, 0xd5, 0xa5, 0x5d, 0xf0, 0xc3, 0xe9, 0x27, 0xbb, 0xac, 0x2e, 0x8d, 0x58, 0x5f, + 0x5d, 0x3a, 0x0f, 0x9c, 0xe0, 0x39, 0xf3, 0x5d, 0x30, 0x99, 0x12, 0x97, 0x0e, 0x93, 0x89, 0x69, + 0x66, 0x07, 0x46, 0x8d, 0x29, 0xf9, 0x40, 0x1d, 0x0b, 0xbe, 0x67, 0x08, 0x4a, 0xca, 0xe8, 0x8c, + 0x2e, 0x9a, 0xbe, 0x04, 0xa7, 0x93, 0xbe, 0x04, 0x43, 0x15, 0xbf, 0x6e, 0xb8, 0x0f, 0xac, 0x67, + 0xc4, 0xee, 0xcb, 0xdb, 0x00, 0x7b, 0x7f, 0xd3, 0xa0, 0x69, 0xf2, 0x8b, 0x3d, 0x3b, 0x25, 0xf4, + 0x75, 0x34, 0x0e, 0x5c, 0x86, 0x49, 0xcf, 0x67, 0x32, 0x3a, 0xa9, 0x4b, 0x01, 0x8c, 0xc9, 0x59, + 0x25, 0x3d, 0x18, 0x4e, 0x82, 0x00, 0xa7, 0xcb, 0xd0, 0x0a, 0xb9, 0xa0, 0x94, 0xb4, 0x46, 0x70, + 0x39, 0x0a, 0x0b, 0x2c, 0x7a, 0x0c, 0xfa, 0x5b, 0x7e, 0x7d, 0xa5, 0x22, 0xe4, 0x73, 0x2d, 0x62, + 0x6c, 0x7d, 0xa5, 0x82, 0x39, 0x0e, 0xcd, 0xc1, 0x00, 0xfb, 0x11, 0x4e, 0x8f, 0xe4, 0x47, 0x3d, + 0x61, 0x25, 0xb4, 0x3c, 0x57, 0xac, 0x00, 0x16, 0x05, 0x99, 0x56, 0x94, 0x5e, 0x6a, 0x98, 0x56, + 0x74, 0xf0, 0x3e, 0xb5, 0xa2, 0x92, 0x01, 0x8e, 0x79, 0xa1, 0x3b, 0x70, 0xd2, 0xb8, 0x48, 0xf2, + 0x29, 0x42, 0x42, 0x11, 0x79, 0xe1, 0xb1, 0x8e, 0x37, 0x48, 0xe1, 0xc4, 0x70, 0x46, 0x34, 0xfa, + 0xe4, 0x4a, 0x16, 0x27, 0x9c, 0x5d, 0x01, 0x6a, 0xc0, 0x64, 0x2d, 0x55, 0xeb, 0x50, 0xef, 0xb5, + 0xaa, 0x01, 0x4d, 0xd7, 0x98, 0x66, 0x8c, 0x5e, 0x81, 0xa1, 0xb7, 0xfc, 0x90, 0x9d, 0x6d, 0xe2, + 0x4e, 0x21, 0x9f, 0xed, 0x0f, 0xbd, 0x7e, 0xbd, 0xca, 0xe0, 0x07, 0xfb, 0xe5, 0xe1, 0x8a, 0x5f, + 0x97, 0x7f, 0xb1, 0x2a, 0x80, 0x7e, 0xc0, 0x82, 0x99, 0xf4, 0x4d, 0x55, 0x35, 0x7a, 0xb4, 0xf7, + 0x46, 0xdb, 0xa2, 0xd2, 0x99, 0xa5, 0x5c, 0x76, 0xb8, 0x43, 0x55, 0xe8, 0x83, 0x74, 0x21, 0x84, + 0xee, 0x5d, 0x22, 0x92, 0x84, 0x3e, 0x1a, 0x2f, 0x04, 0x0a, 0x3d, 0xd8, 0x2f, 0x8f, 0xf3, 0x2d, + 0x2d, 0x7e, 0x37, 0x23, 0x0a, 0xd8, 0xbf, 0x6c, 0x31, 0xb5, 0xac, 0x80, 0x92, 0xb0, 0xdd, 0x38, + 0x8e, 0xcc, 0xc0, 0x4b, 0x86, 0xc9, 0xf3, 0xbe, 0xfd, 0x61, 0xfe, 0x89, 0xc5, 0xfc, 0x61, 0x8e, + 0xf1, 0xe1, 0xcb, 0xeb, 0x30, 0x14, 0xc9, 0x8c, 0xcd, 0x1d, 0x92, 0x19, 0x6b, 0x8d, 0x62, 0x3e, + 0x41, 0xea, 0x72, 0xa0, 0x92, 0x33, 0x2b, 0x36, 0xf6, 0x3f, 0xe4, 0x23, 0x20, 0x31, 0xc7, 0x60, + 0x59, 0x5a, 0x34, 0x2d, 0x4b, 0xe5, 0x2e, 0x5f, 0x90, 0x63, 0x61, 0xfa, 0x07, 0x66, 0xbb, 0x99, + 0x52, 0xec, 0x9d, 0xee, 0x88, 0x65, 0x7f, 0xde, 0x02, 0x88, 0x63, 0x79, 0xf7, 0x90, 0x93, 0xef, + 0x25, 0x7a, 0x1d, 0xf0, 0x23, 0xbf, 0xe6, 0x37, 0x84, 0xdd, 0xf4, 0x91, 0xd8, 0xb8, 0xc5, 0xe1, + 0x07, 0xda, 0x6f, 0xac, 0xa8, 0x51, 0x59, 0x46, 0x0e, 0x2c, 0xc6, 0xe6, 0x56, 0x23, 0x6a, 0xe0, + 0x17, 0x2d, 0x38, 0x91, 0xe5, 0x45, 0x4d, 0x2f, 0x97, 0x5c, 0x3d, 0xa8, 0x9c, 0xe4, 0xd4, 0x68, + 0xde, 0x14, 0x70, 0xac, 0x28, 0x7a, 0x4e, 0x76, 0x78, 0xb8, 0x20, 0xda, 0xd7, 0x61, 0xb4, 0x12, + 0x10, 0xed, 0x5c, 0x7e, 0x95, 0x47, 0xa3, 0xe0, 0xed, 0x79, 0xe6, 0xd0, 0x91, 0x28, 0xec, 0x2f, + 0x17, 0xe0, 0x04, 0xf7, 0x35, 0x99, 0xdb, 0xf5, 0xdd, 0x7a, 0xc5, 0xaf, 0x8b, 0xb7, 0x72, 0x6f, + 0xc0, 0x48, 0x4b, 0xd3, 0xe9, 0x76, 0x0a, 0x08, 0xab, 0xeb, 0x7e, 0x63, 0x2d, 0x94, 0x0e, 0xc5, + 0x06, 0x2f, 0x54, 0x87, 0x11, 0xb2, 0xeb, 0xd6, 0x94, 0xc3, 0x42, 0xe1, 0xd0, 0x67, 0xa4, 0xaa, + 0x65, 0x49, 0xe3, 0x83, 0x0d, 0xae, 0x0f, 0x20, 0x05, 0xb9, 0xfd, 0x63, 0x16, 0x3c, 0x94, 0x13, + 0x3e, 0x96, 0x56, 0x77, 0x9b, 0x79, 0xf5, 0x88, 0x69, 0xab, 0xaa, 0xe3, 0xbe, 0x3e, 0x58, 0x60, + 0xd1, 0x47, 0x01, 0xb8, 0xaf, 0x0e, 0xf1, 0x6a, 0x5d, 0xe3, 0x6c, 0x1a, 0x21, 0x02, 0xb5, 0x68, + 0x6f, 0xb2, 0x3c, 0xd6, 0x78, 0xd9, 0x5f, 0xec, 0x83, 0x7e, 0xe6, 0x1b, 0x82, 0x2a, 0x30, 0xb8, + 0xcd, 0x13, 0x02, 0x75, 0x1c, 0x37, 0x4a, 0x2b, 0x73, 0x0c, 0xc5, 0xe3, 0xa6, 0x41, 0xb1, 0x64, + 0x83, 0x56, 0x61, 0x8a, 0xe7, 0x65, 0x6a, 0x2c, 0x92, 0x86, 0xb3, 0x27, 0xd5, 0xa5, 0x3c, 0x89, + 0xb0, 0x52, 0x1b, 0xaf, 0xa4, 0x49, 0x70, 0x56, 0x39, 0xf4, 0x2a, 0x8c, 0xd1, 0xeb, 0xab, 0xdf, + 0x8e, 0x24, 0x27, 0x9e, 0x91, 0x49, 0x49, 0xf4, 0xeb, 0x06, 0x16, 0x27, 0xa8, 0xd1, 0x2b, 0x30, + 0xda, 0x4a, 0x29, 0x86, 0xfb, 0x63, 0x0d, 0x8a, 0xa9, 0x0c, 0x36, 0x69, 0x99, 0x23, 0x75, 0x9b, + 0xb9, 0x8d, 0xaf, 0x6f, 0x07, 0x24, 0xdc, 0xf6, 0x1b, 0x75, 0x26, 0x39, 0xf6, 0x6b, 0x8e, 0xd4, + 0x09, 0x3c, 0x4e, 0x95, 0xa0, 0x5c, 0x36, 0x1d, 0xb7, 0xd1, 0x0e, 0x48, 0xcc, 0x65, 0xc0, 0xe4, + 0xb2, 0x9c, 0xc0, 0xe3, 0x54, 0x89, 0xee, 0x1a, 0xef, 0xc1, 0xa3, 0xd1, 0x78, 0xdb, 0x3f, 0x53, + 0x00, 0x63, 0x68, 0xbf, 0x73, 0x33, 0x45, 0xd1, 0x2f, 0xdb, 0x0a, 0x5a, 0x35, 0xe1, 0x07, 0x95, + 0xf9, 0x65, 0x71, 0x02, 0x58, 0xfe, 0x65, 0xf4, 0x3f, 0x66, 0xa5, 0xe8, 0x1a, 0x3f, 0x59, 0x09, + 0x7c, 0x7a, 0xc8, 0xc9, 0x78, 0x65, 0xea, 0xbd, 0xc2, 0xa0, 0x7c, 0xcb, 0xdd, 0x21, 0xb2, 0xa7, + 0xf0, 0xe8, 0xe6, 0x1c, 0x0c, 0x97, 0xa1, 0xaa, 0x08, 0xaa, 0x20, 0xb9, 0xa0, 0x4b, 0x30, 0x2c, + 0xd2, 0xff, 0x30, 0xb7, 0x7a, 0xbe, 0x98, 0x98, 0x8b, 0xd3, 0x62, 0x0c, 0xc6, 0x3a, 0x8d, 0xfd, + 0x83, 0x05, 0x98, 0xca, 0x78, 0x17, 0xc5, 0x8f, 0x91, 0x2d, 0x37, 0x8c, 0x54, 0x8e, 0x59, 0xed, + 0x18, 0xe1, 0x70, 0xac, 0x28, 0xe8, 0x5e, 0xc5, 0x0f, 0xaa, 0xe4, 0xe1, 0x24, 0xde, 0x1d, 0x08, + 0xec, 0x21, 0xb3, 0xb5, 0x9e, 0x83, 0xbe, 0x76, 0x48, 0x64, 0x4c, 0x5e, 0x75, 0x6c, 0x33, 0x73, + 0x30, 0xc3, 0xd0, 0x1b, 0xd8, 0x96, 0xb2, 0xac, 0x6a, 0x37, 0x30, 0x6e, 0x5b, 0xe5, 0x38, 0xda, + 0xb8, 0x88, 0x78, 0x8e, 0x17, 0x89, 0x7b, 0x5a, 0x1c, 0x5c, 0x92, 0x41, 0xb1, 0xc0, 0xda, 0x5f, + 0x28, 0xc2, 0xe9, 0xdc, 0x97, 0x92, 0xb4, 0xe9, 0x4d, 0xdf, 0x73, 0x23, 0x5f, 0xf9, 0x8e, 0xf1, + 0x80, 0x92, 0xa4, 0xb5, 0xbd, 0x2a, 0xe0, 0x58, 0x51, 0xa0, 0xf3, 0xd0, 0xcf, 0x94, 0xc9, 0xa9, + 0x6c, 0xbb, 0xf3, 0x8b, 0x3c, 0xc2, 0x18, 0x47, 0xf7, 0x9c, 0x20, 0xfd, 0x31, 0x2a, 0xc1, 0xf8, + 0x8d, 0xe4, 0x81, 0x42, 0x9b, 0xeb, 0xfb, 0x0d, 0xcc, 0x90, 0xe8, 0x09, 0xd1, 0x5f, 0x09, 0x67, + 0x29, 0xec, 0xd4, 0xfd, 0x50, 0xeb, 0xb4, 0xa7, 0x60, 0x70, 0x87, 0xec, 0x05, 0xae, 0xb7, 0x95, + 0x74, 0xa2, 0xbb, 0xca, 0xc1, 0x58, 0xe2, 0xcd, 0xf4, 0x90, 0x83, 0x47, 0x9d, 0xd9, 0x7c, 0xa8, + 0xab, 0x78, 0xf2, 0xc3, 0x45, 0x18, 0xc7, 0xf3, 0x8b, 0xef, 0x0e, 0xc4, 0x8d, 0xf4, 0x40, 0x1c, + 0x75, 0x66, 0xf3, 0xee, 0xa3, 0xf1, 0x0b, 0x16, 0x8c, 0xb3, 0x24, 0x44, 0x22, 0x1e, 0x82, 0xeb, + 0x7b, 0xc7, 0x70, 0x15, 0x78, 0x0c, 0xfa, 0x03, 0x5a, 0x69, 0x32, 0xcd, 0x2e, 0x6b, 0x09, 0xe6, + 0x38, 0xf4, 0x08, 0xf4, 0xb1, 0x26, 0xd0, 0xc1, 0x1b, 0xe1, 0x5b, 0xf0, 0xa2, 0x13, 0x39, 0x98, + 0x41, 0x59, 0x7c, 0x2d, 0x4c, 0x5a, 0x0d, 0x97, 0x37, 0x3a, 0x36, 0xf5, 0xbf, 0x33, 0x62, 0x28, + 0x64, 0x36, 0xed, 0xed, 0xc5, 0xd7, 0xca, 0x66, 0xd9, 0xf9, 0x9a, 0xfd, 0xc7, 0x05, 0x38, 0x9b, + 0x59, 0xae, 0xe7, 0xf8, 0x5a, 0x9d, 0x4b, 0x3f, 0xc8, 0x34, 0x33, 0xc5, 0x63, 0x74, 0x51, 0xee, + 0xeb, 0x55, 0xfa, 0xef, 0xef, 0x21, 0xec, 0x55, 0x66, 0x97, 0xbd, 0x43, 0xc2, 0x5e, 0x65, 0xb6, + 0x2d, 0x47, 0x4d, 0xf0, 0xe7, 0x85, 0x9c, 0x6f, 0x61, 0x0a, 0x83, 0x0b, 0x74, 0x9f, 0x61, 0xc8, + 0x50, 0x5e, 0xc2, 0xf9, 0x1e, 0xc3, 0x61, 0x58, 0x61, 0xd1, 0x1c, 0x8c, 0x37, 0x5d, 0x8f, 0x6e, + 0x3e, 0x7b, 0xa6, 0x28, 0xae, 0x6c, 0x00, 0xab, 0x26, 0x1a, 0x27, 0xe9, 0x91, 0xab, 0x85, 0xc4, + 0xe2, 0x5f, 0xf7, 0xca, 0xa1, 0x56, 0xdd, 0xac, 0xe9, 0x06, 0xa1, 0x7a, 0x31, 0x23, 0x3c, 0xd6, + 0xaa, 0xa6, 0x27, 0x2a, 0xf6, 0xae, 0x27, 0x1a, 0xc9, 0xd6, 0x11, 0xcd, 0xbc, 0x02, 0xa3, 0xf7, + 0x6d, 0x53, 0xb0, 0xbf, 0x5e, 0x84, 0x87, 0x3b, 0x2c, 0x7b, 0xbe, 0xd7, 0x1b, 0x63, 0xa0, 0xed, + 0xf5, 0xa9, 0x71, 0xa8, 0xc0, 0x89, 0xcd, 0x76, 0xa3, 0xb1, 0xc7, 0x5e, 0xee, 0x90, 0xba, 0xa4, + 0x10, 0x32, 0xa5, 0x54, 0x8e, 0x9c, 0x58, 0xce, 0xa0, 0xc1, 0x99, 0x25, 0xe9, 0x15, 0x8b, 0x9e, + 0x24, 0x7b, 0x8a, 0x55, 0xe2, 0x8a, 0x85, 0x75, 0x24, 0x36, 0x69, 0xd1, 0x65, 0x98, 0x74, 0x76, + 0x1d, 0x97, 0xc7, 0x15, 0x97, 0x0c, 0xf8, 0x1d, 0x4b, 0xa9, 0x82, 0xe7, 0x92, 0x04, 0x38, 0x5d, + 0x06, 0xbd, 0x06, 0xc8, 0xdf, 0x60, 0xfe, 0xfd, 0xf5, 0xcb, 0xc4, 0x13, 0xd6, 0x6a, 0x36, 0x76, + 0xc5, 0x78, 0x4b, 0xb8, 0x9e, 0xa2, 0xc0, 0x19, 0xa5, 0x12, 0xf1, 0x9f, 0x06, 0xf2, 0xe3, 0x3f, + 0x75, 0xde, 0x17, 0xbb, 0x66, 0x38, 0xba, 0x04, 0xa3, 0x87, 0xf4, 0x5a, 0xb5, 0xff, 0x83, 0x45, + 0x4f, 0x3c, 0x5e, 0xc6, 0x0c, 0xae, 0xfa, 0x0a, 0x73, 0xab, 0xe5, 0x9a, 0x65, 0x2d, 0xc0, 0xce, + 0x49, 0xcd, 0xad, 0x36, 0x46, 0x62, 0x93, 0x96, 0xcf, 0x21, 0xcd, 0x1d, 0xd6, 0xb8, 0x15, 0x88, + 0x08, 0x70, 0x8a, 0x02, 0x7d, 0x0c, 0x06, 0xeb, 0xee, 0xae, 0x1b, 0x0a, 0xe5, 0xd8, 0xa1, 0x8d, + 0x58, 0xf1, 0xd6, 0xb9, 0xc8, 0xd9, 0x60, 0xc9, 0xcf, 0xfe, 0xe1, 0x42, 0xdc, 0x27, 0xaf, 0xb7, + 0xfd, 0xc8, 0x39, 0x86, 0x93, 0xfc, 0xb2, 0x71, 0x92, 0x3f, 0xd1, 0x29, 0x0c, 0x1e, 0x6b, 0x52, + 0xee, 0x09, 0x7e, 0x3d, 0x71, 0x82, 0x3f, 0xd9, 0x9d, 0x55, 0xe7, 0x93, 0xfb, 0x1f, 0x59, 0x30, + 0x69, 0xd0, 0x1f, 0xc3, 0x01, 0xb2, 0x6c, 0x1e, 0x20, 0x8f, 0x76, 0xfd, 0x86, 0x9c, 0x83, 0xe3, + 0xfb, 0x8a, 0x89, 0xb6, 0xb3, 0x03, 0xe3, 0x2d, 0xe8, 0xdb, 0x76, 0x82, 0x7a, 0xa7, 0xb4, 0x1f, + 0xa9, 0x42, 0xb3, 0x57, 0x9c, 0x40, 0x58, 0xf8, 0x9f, 0x91, 0xbd, 0x4e, 0x41, 0x5d, 0xad, 0xfb, + 0xac, 0x2a, 0xf4, 0x12, 0x0c, 0x84, 0x35, 0xbf, 0xa5, 0x9e, 0xfa, 0x9c, 0x63, 0x1d, 0xcd, 0x20, + 0x07, 0xfb, 0x65, 0x64, 0x56, 0x47, 0xc1, 0x58, 0xd0, 0xa3, 0x37, 0x60, 0x94, 0xfd, 0x52, 0xee, + 0x76, 0xc5, 0x7c, 0x0d, 0x46, 0x55, 0x27, 0xe4, 0xbe, 0xa8, 0x06, 0x08, 0x9b, 0xac, 0x66, 0xb6, + 0xa0, 0xa4, 0x3e, 0xeb, 0x81, 0x5a, 0x89, 0xff, 0x6d, 0x11, 0xa6, 0x32, 0xe6, 0x1c, 0x0a, 0x8d, + 0x91, 0xb8, 0xd4, 0xe3, 0x54, 0x7d, 0x9b, 0x63, 0x11, 0xb2, 0x0b, 0x54, 0x5d, 0xcc, 0xad, 0x9e, + 0x2b, 0xbd, 0x11, 0x92, 0x64, 0xa5, 0x14, 0xd4, 0xbd, 0x52, 0x5a, 0xd9, 0xb1, 0x75, 0x35, 0xad, + 0x48, 0xb5, 0xf4, 0x81, 0x8e, 0xe9, 0xaf, 0xf5, 0xc1, 0x89, 0xac, 0xc8, 0x9c, 0xe8, 0x33, 0x89, + 0xa4, 0xb3, 0x2f, 0xf4, 0x1a, 0xd3, 0x93, 0x67, 0xa2, 0x15, 0x11, 0x03, 0x67, 0xcd, 0x34, 0xb4, + 0x5d, 0xbb, 0x59, 0xd4, 0xc9, 0x62, 0x96, 0x04, 0x3c, 0x59, 0xb0, 0xdc, 0x3e, 0xde, 0xdf, 0x73, + 0x03, 0x44, 0x96, 0xe1, 0x30, 0xe1, 0xca, 0x23, 0xc1, 0xdd, 0x5d, 0x79, 0x64, 0xcd, 0x68, 0x05, + 0x06, 0x6a, 0xdc, 0x47, 0xa4, 0xd8, 0x7d, 0x0b, 0xe3, 0x0e, 0x22, 0x6a, 0x03, 0x16, 0x8e, 0x21, + 0x82, 0xc1, 0x8c, 0x0b, 0xc3, 0x5a, 0xc7, 0x3c, 0xd0, 0xc9, 0xb3, 0x43, 0x0f, 0x3e, 0xad, 0x0b, + 0x1e, 0xe8, 0x04, 0xfa, 0x31, 0x0b, 0x12, 0x0f, 0x45, 0x94, 0x52, 0xce, 0xca, 0x55, 0xca, 0x9d, + 0x83, 0xbe, 0xc0, 0x6f, 0x90, 0x64, 0xa2, 0x57, 0xec, 0x37, 0x08, 0x66, 0x18, 0x4a, 0x11, 0xc5, + 0xaa, 0x96, 0x11, 0xfd, 0x1a, 0x29, 0x2e, 0x88, 0x8f, 0x41, 0x7f, 0x83, 0xec, 0x92, 0x46, 0x32, + 0x1f, 0xd7, 0x35, 0x0a, 0xc4, 0x1c, 0x67, 0xff, 0x42, 0x1f, 0x9c, 0xe9, 0x18, 0x40, 0x88, 0x5e, + 0xc6, 0xb6, 0x9c, 0x88, 0xdc, 0x76, 0xf6, 0x92, 0x89, 0x73, 0x2e, 0x73, 0x30, 0x96, 0x78, 0xf6, + 0x6a, 0x91, 0xc7, 0xbf, 0x4f, 0xa8, 0x30, 0x45, 0xd8, 0x7b, 0x81, 0x35, 0x55, 0x62, 0xc5, 0xa3, + 0x50, 0x89, 0x3d, 0x07, 0x10, 0x86, 0x0d, 0xee, 0x4e, 0x57, 0x17, 0xcf, 0x21, 0xe3, 0x3c, 0x09, + 0xd5, 0x6b, 0x02, 0x83, 0x35, 0x2a, 0xb4, 0x08, 0x13, 0xad, 0xc0, 0x8f, 0xb8, 0x46, 0x78, 0x91, + 0x7b, 0x9c, 0xf6, 0x9b, 0xb1, 0x5b, 0x2a, 0x09, 0x3c, 0x4e, 0x95, 0x40, 0x2f, 0xc2, 0xb0, 0x88, + 0xe7, 0x52, 0xf1, 0xfd, 0x86, 0x50, 0x42, 0x29, 0x27, 0xcc, 0x6a, 0x8c, 0xc2, 0x3a, 0x9d, 0x56, + 0x8c, 0xa9, 0x99, 0x07, 0x33, 0x8b, 0x71, 0x55, 0xb3, 0x46, 0x97, 0x08, 0xf8, 0x3b, 0xd4, 0x53, + 0xc0, 0xdf, 0x58, 0x2d, 0x57, 0xea, 0xd9, 0xea, 0x09, 0x5d, 0x15, 0x59, 0x5f, 0xe9, 0x83, 0x29, + 0x31, 0x71, 0x1e, 0xf4, 0x74, 0xb9, 0x91, 0x9e, 0x2e, 0x47, 0xa1, 0xb8, 0x7b, 0x77, 0xce, 0x1c, + 0xf7, 0x9c, 0xf9, 0x11, 0x0b, 0x4c, 0x49, 0x0d, 0xfd, 0x7f, 0xb9, 0x99, 0xc7, 0x5e, 0xcc, 0x95, + 0xfc, 0x94, 0xc3, 0xe1, 0xdb, 0xcc, 0x41, 0x66, 0xff, 0x3b, 0x0b, 0x1e, 0xed, 0xca, 0x11, 0x2d, + 0x41, 0x89, 0x89, 0x93, 0xda, 0x45, 0xef, 0x49, 0xe5, 0x91, 0x2e, 0x11, 0x39, 0xd2, 0x6d, 0x5c, + 0x12, 0x2d, 0xa5, 0x52, 0xbc, 0x3d, 0x95, 0x91, 0xe2, 0xed, 0xa4, 0xd1, 0x3d, 0xf7, 0x99, 0xe3, + 0xed, 0x87, 0xe8, 0x89, 0x63, 0xbc, 0x06, 0x43, 0xef, 0x37, 0x94, 0x8e, 0x76, 0x42, 0xe9, 0x88, + 0x4c, 0x6a, 0xed, 0x0c, 0xf9, 0x08, 0x4c, 0xb0, 0x40, 0x6f, 0xec, 0x7d, 0x84, 0x78, 0xa7, 0x56, + 0x88, 0x7d, 0xa0, 0xaf, 0x25, 0x70, 0x38, 0x45, 0x6d, 0xff, 0x51, 0x11, 0x06, 0xf8, 0xf2, 0x3b, + 0x86, 0xeb, 0xe5, 0xd3, 0x50, 0x72, 0x9b, 0xcd, 0x36, 0xcf, 0xda, 0xd5, 0x1f, 0x7b, 0xd4, 0xae, + 0x48, 0x20, 0x8e, 0xf1, 0x68, 0x59, 0xe8, 0xbb, 0x3b, 0xc4, 0x92, 0xe5, 0x0d, 0x9f, 0x5d, 0x74, + 0x22, 0x87, 0xcb, 0x4a, 0xea, 0x9c, 0x8d, 0x35, 0xe3, 0xe8, 0x93, 0x00, 0x61, 0x14, 0xb8, 0xde, + 0x16, 0x85, 0x89, 0x10, 0xd6, 0xef, 0xed, 0xc0, 0xad, 0xaa, 0x88, 0x39, 0xcf, 0x78, 0xcf, 0x51, + 0x08, 0xac, 0x71, 0x44, 0xb3, 0xc6, 0x49, 0x3f, 0x93, 0x18, 0x3b, 0xe0, 0x5c, 0xe3, 0x31, 0x9b, + 0xf9, 0x00, 0x94, 0x14, 0xf3, 0x6e, 0xda, 0xaf, 0x11, 0x5d, 0x2c, 0xfa, 0x30, 0x8c, 0x27, 0xda, + 0x76, 0x28, 0xe5, 0xd9, 0x2f, 0x5a, 0x30, 0xce, 0x1b, 0xb3, 0xe4, 0xed, 0x8a, 0xd3, 0xe0, 0x2e, + 0x9c, 0x68, 0x64, 0xec, 0xca, 0x62, 0xf8, 0x7b, 0xdf, 0xc5, 0x95, 0xb2, 0x2c, 0x0b, 0x8b, 0x33, + 0xeb, 0x40, 0x17, 0xe8, 0x8a, 0xa3, 0xbb, 0xae, 0xd3, 0x10, 0xcf, 0xf2, 0x47, 0xf8, 0x6a, 0xe3, + 0x30, 0xac, 0xb0, 0xf6, 0xef, 0x59, 0x30, 0xc9, 0x5b, 0x7e, 0x95, 0xec, 0xa9, 0xbd, 0xe9, 0x5b, + 0xd9, 0x76, 0x91, 0x2f, 0xb2, 0x90, 0x93, 0x2f, 0x52, 0xff, 0xb4, 0x62, 0xc7, 0x4f, 0xfb, 0xb2, + 0x05, 0x62, 0x86, 0x1c, 0x83, 0x3e, 0xe3, 0xbb, 0x4c, 0x7d, 0xc6, 0x4c, 0xfe, 0x22, 0xc8, 0x51, + 0x64, 0xfc, 0x99, 0x05, 0x13, 0x9c, 0x20, 0xb6, 0xd5, 0x7f, 0x4b, 0xc7, 0xa1, 0x97, 0xac, 0xf2, + 0x57, 0xc9, 0xde, 0xba, 0x5f, 0x71, 0xa2, 0xed, 0xec, 0x8f, 0x32, 0x06, 0xab, 0xaf, 0xe3, 0x60, + 0xd5, 0xe5, 0x02, 0x32, 0xd2, 0x29, 0x75, 0x79, 0x5c, 0x7f, 0xd8, 0x74, 0x4a, 0xf6, 0x37, 0x2d, + 0x40, 0xbc, 0x1a, 0x43, 0x70, 0xa3, 0xe2, 0x10, 0x83, 0x6a, 0x07, 0x5d, 0xbc, 0x35, 0x29, 0x0c, + 0xd6, 0xa8, 0x8e, 0xa4, 0x7b, 0x12, 0x0e, 0x17, 0xc5, 0xee, 0x0e, 0x17, 0x87, 0xe8, 0xd1, 0x7f, + 0x31, 0x00, 0xc9, 0x17, 0x71, 0xe8, 0x26, 0x8c, 0xd4, 0x9c, 0x96, 0xb3, 0xe1, 0x36, 0xdc, 0xc8, + 0x25, 0x61, 0x27, 0x6f, 0xac, 0x05, 0x8d, 0x4e, 0x98, 0xc8, 0x35, 0x08, 0x36, 0xf8, 0xa0, 0x59, + 0x80, 0x56, 0xe0, 0xee, 0xba, 0x0d, 0xb2, 0xc5, 0xd4, 0x2e, 0x2c, 0x10, 0x08, 0x77, 0x0d, 0x93, + 0x50, 0xac, 0x51, 0x64, 0x84, 0x1f, 0x28, 0x3e, 0xe0, 0xf0, 0x03, 0x70, 0x6c, 0xe1, 0x07, 0xfa, + 0x0e, 0x15, 0x7e, 0x60, 0xe8, 0xd0, 0xe1, 0x07, 0xfa, 0x7b, 0x0a, 0x3f, 0x80, 0xe1, 0x94, 0x94, + 0x3d, 0xe9, 0xff, 0x65, 0xb7, 0x41, 0xc4, 0x85, 0x83, 0x47, 0x2f, 0x99, 0xb9, 0xb7, 0x5f, 0x3e, + 0x85, 0x33, 0x29, 0x70, 0x4e, 0x49, 0xf4, 0x51, 0x98, 0x76, 0x1a, 0x0d, 0xff, 0xb6, 0x1a, 0xd4, + 0xa5, 0xb0, 0xe6, 0x34, 0xb8, 0x09, 0x64, 0x90, 0x71, 0x7d, 0xe4, 0xde, 0x7e, 0x79, 0x7a, 0x2e, + 0x87, 0x06, 0xe7, 0x96, 0x46, 0x1f, 0x82, 0x52, 0x2b, 0xf0, 0x6b, 0xab, 0xda, 0xb3, 0xdd, 0xb3, + 0xb4, 0x03, 0x2b, 0x12, 0x78, 0xb0, 0x5f, 0x1e, 0x55, 0x7f, 0xd8, 0x81, 0x1f, 0x17, 0xc8, 0x88, + 0x27, 0x30, 0x7c, 0xa4, 0xf1, 0x04, 0x76, 0x60, 0xaa, 0x4a, 0x02, 0xd7, 0x69, 0xb8, 0x77, 0xa9, + 0xbc, 0x2c, 0xf7, 0xa7, 0x75, 0x28, 0x05, 0x89, 0x1d, 0xb9, 0xa7, 0xf8, 0xae, 0x5a, 0x5e, 0x1b, + 0xb9, 0x03, 0xc7, 0x8c, 0xec, 0xff, 0x65, 0xc1, 0xa0, 0x78, 0x01, 0x77, 0x0c, 0x52, 0xe3, 0x9c, + 0x61, 0x94, 0x28, 0x67, 0x77, 0x18, 0x6b, 0x4c, 0xae, 0x39, 0x62, 0x25, 0x61, 0x8e, 0x78, 0xb4, + 0x13, 0x93, 0xce, 0x86, 0x88, 0xbf, 0x5e, 0xa4, 0xd2, 0xbb, 0xf1, 0x16, 0xfb, 0xc1, 0x77, 0xc1, + 0x1a, 0x0c, 0x86, 0xe2, 0x2d, 0x70, 0x21, 0xff, 0x31, 0x46, 0x72, 0x10, 0x63, 0x2f, 0x3a, 0xf1, + 0xfa, 0x57, 0x32, 0xc9, 0x7c, 0x64, 0x5c, 0x7c, 0x80, 0x8f, 0x8c, 0xbb, 0xbd, 0x56, 0xef, 0x3b, + 0x8a, 0xd7, 0xea, 0xf6, 0xd7, 0xd8, 0xc9, 0xa9, 0xc3, 0x8f, 0x41, 0xa8, 0xba, 0x6c, 0x9e, 0xb1, + 0x76, 0x87, 0x99, 0x25, 0x1a, 0x95, 0x23, 0x5c, 0xfd, 0xbc, 0x05, 0x67, 0x32, 0xbe, 0x4a, 0x93, + 0xb4, 0x9e, 0x81, 0x21, 0xa7, 0x5d, 0x77, 0xd5, 0x5a, 0xd6, 0x4c, 0x93, 0x73, 0x02, 0x8e, 0x15, + 0x05, 0x5a, 0x80, 0x49, 0x72, 0xa7, 0xe5, 0x72, 0x43, 0xae, 0xee, 0x7c, 0x5c, 0xe4, 0xcf, 0x26, + 0x97, 0x92, 0x48, 0x9c, 0xa6, 0x57, 0x71, 0x8d, 0x8a, 0xb9, 0x71, 0x8d, 0xfe, 0x8e, 0x05, 0xc3, + 0xea, 0x35, 0xec, 0x03, 0xef, 0xed, 0x8f, 0x98, 0xbd, 0xfd, 0x70, 0x87, 0xde, 0xce, 0xe9, 0xe6, + 0xdf, 0x29, 0xa8, 0xf6, 0x56, 0xfc, 0x20, 0xea, 0x41, 0x82, 0xbb, 0xff, 0x87, 0x13, 0x97, 0x60, + 0xd8, 0x69, 0xb5, 0x24, 0x42, 0x7a, 0xc0, 0xb1, 0x68, 0xdd, 0x31, 0x18, 0xeb, 0x34, 0xea, 0x1d, + 0x47, 0x31, 0xf7, 0x1d, 0x47, 0x1d, 0x20, 0x72, 0x82, 0x2d, 0x12, 0x51, 0x98, 0x70, 0xd8, 0xcd, + 0xdf, 0x6f, 0xda, 0x91, 0xdb, 0x98, 0x75, 0xbd, 0x28, 0x8c, 0x82, 0xd9, 0x15, 0x2f, 0xba, 0x1e, + 0xf0, 0x2b, 0xa4, 0x16, 0x19, 0x4c, 0xf1, 0xc2, 0x1a, 0x5f, 0x19, 0xf9, 0x81, 0xd5, 0xd1, 0x6f, + 0xba, 0x52, 0xac, 0x09, 0x38, 0x56, 0x14, 0xf6, 0x07, 0xd8, 0xe9, 0xc3, 0xfa, 0xf4, 0x70, 0x51, + 0xb1, 0x7e, 0x6a, 0x44, 0x8d, 0x06, 0x33, 0x8a, 0x2e, 0xea, 0xb1, 0xb7, 0x3a, 0x6f, 0xf6, 0xb4, + 0x62, 0xfd, 0x41, 0x62, 0x1c, 0xa0, 0x0b, 0x7d, 0x3c, 0xe5, 0x1e, 0xf3, 0x6c, 0x97, 0x53, 0xe3, + 0x10, 0x0e, 0x31, 0x2c, 0x75, 0x0f, 0x4b, 0x6c, 0xb2, 0x52, 0x11, 0xeb, 0x42, 0x4b, 0xdd, 0x23, + 0x10, 0x38, 0xa6, 0xa1, 0xc2, 0x94, 0xfa, 0x13, 0x4e, 0xa3, 0x38, 0x84, 0xad, 0xa2, 0x0e, 0xb1, + 0x46, 0x81, 0x2e, 0x0a, 0x85, 0x02, 0xb7, 0x0b, 0x3c, 0x9c, 0x50, 0x28, 0xc8, 0xee, 0xd2, 0xb4, + 0x40, 0x97, 0x60, 0x58, 0x25, 0x6a, 0xaf, 0xf0, 0xa4, 0x59, 0x62, 0x9a, 0x2d, 0xc5, 0x60, 0xac, + 0xd3, 0xa0, 0x75, 0x18, 0x0f, 0xb9, 0x9e, 0x4d, 0xc5, 0x15, 0xe7, 0xfa, 0xca, 0xf7, 0xaa, 0x77, + 0xc8, 0x26, 0xfa, 0x80, 0x81, 0xf8, 0xee, 0x24, 0xa3, 0x33, 0x24, 0x59, 0xa0, 0x57, 0x61, 0xac, + 0xe1, 0x3b, 0xf5, 0x79, 0xa7, 0xe1, 0x78, 0x35, 0xd6, 0x3f, 0x43, 0x66, 0xbe, 0xdf, 0x6b, 0x06, + 0x16, 0x27, 0xa8, 0xa9, 0xf0, 0xa6, 0x43, 0x44, 0x74, 0x31, 0xc7, 0xdb, 0x22, 0xa1, 0x48, 0xbb, + 0xcd, 0x84, 0xb7, 0x6b, 0x39, 0x34, 0x38, 0xb7, 0x34, 0x7a, 0x09, 0x46, 0xe4, 0xe7, 0x6b, 0xc1, + 0x4c, 0xe2, 0x27, 0x31, 0x1a, 0x0e, 0x1b, 0x94, 0x28, 0x84, 0x93, 0xf2, 0xff, 0x7a, 0xe0, 0x6c, + 0x6e, 0xba, 0x35, 0xf1, 0xc2, 0x9f, 0x3f, 0xbb, 0xfd, 0xb0, 0x7c, 0x1b, 0xba, 0x94, 0x45, 0x74, + 0xb0, 0x5f, 0x7e, 0x44, 0xf4, 0x5a, 0x26, 0x1e, 0x67, 0xf3, 0x46, 0xab, 0x30, 0xb5, 0x4d, 0x9c, + 0x46, 0xb4, 0xbd, 0xb0, 0x4d, 0x6a, 0x3b, 0x72, 0xc1, 0xb1, 0xf0, 0x28, 0xda, 0xd3, 0x91, 0x2b, + 0x69, 0x12, 0x9c, 0x55, 0x0e, 0xbd, 0x09, 0xd3, 0xad, 0xf6, 0x46, 0xc3, 0x0d, 0xb7, 0xd7, 0xfc, + 0x88, 0x39, 0x21, 0xa9, 0x9c, 0xef, 0x22, 0x8e, 0x8a, 0x0a, 0x40, 0x53, 0xc9, 0xa1, 0xc3, 0xb9, + 0x1c, 0xd0, 0x5d, 0x38, 0x99, 0x98, 0x08, 0x22, 0x92, 0xc4, 0x58, 0x7e, 0x56, 0x91, 0x6a, 0x56, + 0x01, 0x11, 0x94, 0x25, 0x0b, 0x85, 0xb3, 0xab, 0x40, 0x2f, 0x03, 0xb8, 0xad, 0x65, 0xa7, 0xe9, + 0x36, 0xe8, 0x55, 0x71, 0x8a, 0xcd, 0x11, 0x7a, 0x6d, 0x80, 0x95, 0x8a, 0x84, 0xd2, 0xbd, 0x59, + 0xfc, 0xdb, 0xc3, 0x1a, 0x35, 0xba, 0x06, 0x63, 0xe2, 0xdf, 0x9e, 0x18, 0x52, 0x1e, 0xd0, 0xe4, + 0x71, 0x16, 0x8d, 0xaa, 0xa2, 0x63, 0x0e, 0x52, 0x10, 0x9c, 0x28, 0x8b, 0xb6, 0xe0, 0x8c, 0xcc, + 0x10, 0xa7, 0xcf, 0x4f, 0x39, 0x06, 0x21, 0x4b, 0xe5, 0x31, 0xc4, 0x5f, 0xa5, 0xcc, 0x75, 0x22, + 0xc4, 0x9d, 0xf9, 0xd0, 0x73, 0x5d, 0x9f, 0xe6, 0xfc, 0xc9, 0xef, 0x49, 0xee, 0xe1, 0x44, 0xcf, + 0xf5, 0x6b, 0x49, 0x24, 0x4e, 0xd3, 0x23, 0x1f, 0x4e, 0xba, 0x5e, 0xd6, 0xac, 0x3e, 0xc5, 0x18, + 0x7d, 0x90, 0xbf, 0x76, 0xee, 0x3c, 0xa3, 0x33, 0xf1, 0x38, 0x9b, 0xef, 0xdb, 0xf3, 0xfb, 0xfb, + 0x5d, 0x8b, 0x96, 0xd6, 0xa4, 0x73, 0xf4, 0x29, 0x18, 0xd1, 0x3f, 0x4a, 0x48, 0x1a, 0xe7, 0xb3, + 0x85, 0x57, 0x6d, 0x4f, 0xe0, 0xb2, 0xbd, 0x5a, 0xf7, 0x3a, 0x0e, 0x1b, 0x1c, 0x51, 0x2d, 0x23, + 0x26, 0xc0, 0xc5, 0xde, 0x24, 0x99, 0xde, 0xdd, 0xde, 0x08, 0x64, 0x4f, 0x77, 0x74, 0x0d, 0x86, + 0x6a, 0x0d, 0x97, 0x78, 0xd1, 0x4a, 0xa5, 0x53, 0xd4, 0xc3, 0x05, 0x41, 0x23, 0xd6, 0x8f, 0xc8, + 0xca, 0xc1, 0x61, 0x58, 0x71, 0xb0, 0x7f, 0xa3, 0x00, 0xe5, 0x2e, 0x29, 0x5e, 0x12, 0x66, 0x28, + 0xab, 0x27, 0x33, 0xd4, 0x1c, 0x8c, 0xc7, 0xff, 0x74, 0x0d, 0x97, 0xf2, 0x64, 0xbd, 0x69, 0xa2, + 0x71, 0x92, 0xbe, 0xe7, 0x47, 0x09, 0xba, 0x25, 0xab, 0xaf, 0xeb, 0xb3, 0x1a, 0xc3, 0x82, 0xdd, + 0xdf, 0xfb, 0xb5, 0x37, 0xd7, 0x1a, 0x69, 0x7f, 0xad, 0x00, 0x27, 0x55, 0x17, 0x7e, 0xe7, 0x76, + 0xdc, 0x8d, 0x74, 0xc7, 0x1d, 0x81, 0x2d, 0xd7, 0xbe, 0x0e, 0x03, 0x3c, 0x8c, 0x63, 0x0f, 0xe2, + 0xf6, 0x63, 0x66, 0x70, 0x67, 0x25, 0xe1, 0x19, 0x01, 0x9e, 0x7f, 0xc0, 0x82, 0xf1, 0xc4, 0xeb, + 0x36, 0x84, 0xb5, 0x27, 0xd0, 0xf7, 0x23, 0x12, 0x67, 0x09, 0xdb, 0xe7, 0xa0, 0x6f, 0xdb, 0x0f, + 0xa3, 0xa4, 0xa3, 0xc7, 0x15, 0x3f, 0x8c, 0x30, 0xc3, 0xd8, 0xbf, 0x6f, 0x41, 0xff, 0xba, 0xe3, + 0x7a, 0x91, 0x34, 0x0a, 0x58, 0x39, 0x46, 0x81, 0x5e, 0xbe, 0x0b, 0xbd, 0x08, 0x03, 0x64, 0x73, + 0x93, 0xd4, 0x22, 0x31, 0xaa, 0x32, 0xf4, 0xc4, 0xc0, 0x12, 0x83, 0x52, 0xf9, 0x8f, 0x55, 0xc6, + 0xff, 0x62, 0x41, 0x8c, 0x6e, 0x41, 0x29, 0x72, 0x9b, 0x64, 0xae, 0x5e, 0x17, 0xa6, 0xf2, 0xfb, + 0x08, 0x9f, 0xb1, 0x2e, 0x19, 0xe0, 0x98, 0x97, 0xfd, 0x85, 0x02, 0x40, 0x1c, 0xff, 0xaa, 0xdb, + 0x27, 0xce, 0xa7, 0x8c, 0xa8, 0xe7, 0x33, 0x8c, 0xa8, 0x28, 0x66, 0x98, 0x61, 0x41, 0x55, 0xdd, + 0x54, 0xec, 0xa9, 0x9b, 0xfa, 0x0e, 0xd3, 0x4d, 0x0b, 0x30, 0x19, 0xc7, 0xef, 0x32, 0xc3, 0x17, + 0xb2, 0xa3, 0x73, 0x3d, 0x89, 0xc4, 0x69, 0x7a, 0x9b, 0xc0, 0x39, 0x15, 0xc6, 0x48, 0x9c, 0x68, + 0xcc, 0x0f, 0x5c, 0x37, 0x4a, 0x77, 0xe9, 0xa7, 0xd8, 0x4a, 0x5c, 0xc8, 0xb5, 0x12, 0xff, 0xa4, + 0x05, 0x27, 0x92, 0xf5, 0xb0, 0x47, 0xd3, 0x9f, 0xb7, 0xe0, 0x24, 0xb3, 0x95, 0xb3, 0x5a, 0xd3, + 0x96, 0xf9, 0x17, 0x3a, 0x86, 0x66, 0xca, 0x69, 0x71, 0x1c, 0xe3, 0x64, 0x35, 0x8b, 0x35, 0xce, + 0xae, 0xd1, 0xfe, 0x9f, 0x7d, 0x30, 0x9d, 0x17, 0xd3, 0x89, 0x3d, 0x13, 0x71, 0xee, 0x54, 0x77, + 0xc8, 0x6d, 0xe1, 0x8c, 0x1f, 0x3f, 0x13, 0xe1, 0x60, 0x2c, 0xf1, 0xc9, 0xac, 0x1d, 0x85, 0x1e, + 0xb3, 0x76, 0x6c, 0xc3, 0xe4, 0xed, 0x6d, 0xe2, 0xdd, 0xf0, 0x42, 0x27, 0x72, 0xc3, 0x4d, 0x97, + 0xd9, 0x95, 0xf9, 0xbc, 0x91, 0xa9, 0x7e, 0x27, 0x6f, 0x25, 0x09, 0x0e, 0xf6, 0xcb, 0x67, 0x0c, + 0x40, 0xdc, 0x64, 0xbe, 0x91, 0xe0, 0x34, 0xd3, 0x74, 0xd2, 0x93, 0xbe, 0x07, 0x9c, 0xf4, 0xa4, + 0xe9, 0x0a, 0x6f, 0x14, 0xf9, 0x06, 0x80, 0xdd, 0x18, 0x57, 0x15, 0x14, 0x6b, 0x14, 0xe8, 0x13, + 0x80, 0xf4, 0xa4, 0x4e, 0x46, 0x48, 0xcd, 0x67, 0xef, 0xed, 0x97, 0xd1, 0x5a, 0x0a, 0x7b, 0xb0, + 0x5f, 0x9e, 0xa2, 0xd0, 0x15, 0x8f, 0xde, 0x3c, 0xe3, 0x38, 0x64, 0x19, 0x8c, 0xd0, 0x2d, 0x98, + 0xa0, 0x50, 0xb6, 0xa2, 0x64, 0xbc, 0x4e, 0x7e, 0x5b, 0x7c, 0xfa, 0xde, 0x7e, 0x79, 0x62, 0x2d, + 0x81, 0xcb, 0x63, 0x9d, 0x62, 0x82, 0x5e, 0x86, 0xb1, 0x78, 0x5e, 0x5d, 0x25, 0x7b, 0x3c, 0x3e, + 0x4e, 0x89, 0x2b, 0xbc, 0x57, 0x0d, 0x0c, 0x4e, 0x50, 0xda, 0x9f, 0xb7, 0xe0, 0x74, 0x6e, 0xe2, + 0x71, 0x74, 0x01, 0x86, 0x9c, 0x96, 0xcb, 0xcd, 0x17, 0xe2, 0xa8, 0x61, 0x6a, 0xb2, 0xca, 0x0a, + 0x37, 0x5e, 0x28, 0x2c, 0xdd, 0xe1, 0x77, 0x5c, 0xaf, 0x9e, 0xdc, 0xe1, 0xaf, 0xba, 0x5e, 0x1d, + 0x33, 0x8c, 0x3a, 0xb2, 0x8a, 0xb9, 0x4f, 0x11, 0xbe, 0x42, 0xd7, 0x6a, 0x46, 0x8a, 0xf2, 0xe3, + 0x6d, 0x06, 0x7a, 0x5a, 0x37, 0x35, 0x0a, 0xaf, 0xc2, 0x5c, 0x33, 0xe3, 0xf7, 0x5b, 0x20, 0x9e, + 0x2e, 0xf7, 0x70, 0x26, 0xbf, 0x01, 0x23, 0xbb, 0xe9, 0x84, 0x77, 0xe7, 0xf2, 0xdf, 0x72, 0x8b, + 0x40, 0xe1, 0x4a, 0xd0, 0x36, 0x92, 0xdb, 0x19, 0xbc, 0xec, 0x3a, 0x08, 0xec, 0x22, 0x61, 0x06, + 0x85, 0xee, 0xad, 0x79, 0x0e, 0xa0, 0xce, 0x68, 0x59, 0x16, 0xdc, 0x82, 0x29, 0x71, 0x2d, 0x2a, + 0x0c, 0xd6, 0xa8, 0xec, 0x7f, 0x55, 0x80, 0x61, 0x99, 0x60, 0xad, 0xed, 0xf5, 0xa2, 0xf6, 0x3b, + 0x54, 0xc6, 0x65, 0x74, 0x11, 0x4a, 0x4c, 0x2f, 0x5d, 0x89, 0xb5, 0xa5, 0x4a, 0x2b, 0xb4, 0x2a, + 0x11, 0x38, 0xa6, 0xa1, 0xbb, 0x63, 0xd8, 0xde, 0x60, 0xe4, 0x89, 0x87, 0xb6, 0x55, 0x0e, 0xc6, + 0x12, 0x8f, 0x3e, 0x0a, 0x13, 0xbc, 0x5c, 0xe0, 0xb7, 0x9c, 0x2d, 0x6e, 0xcb, 0xea, 0x57, 0xd1, + 0x4b, 0x26, 0x56, 0x13, 0xb8, 0x83, 0xfd, 0xf2, 0x89, 0x24, 0x8c, 0x19, 0x69, 0x53, 0x5c, 0x98, + 0xcb, 0x1a, 0xaf, 0x84, 0xee, 0xea, 0x29, 0x4f, 0xb7, 0x18, 0x85, 0x75, 0x3a, 0xfb, 0x53, 0x80, + 0xd2, 0xa9, 0xe6, 0xd0, 0x6b, 0xdc, 0xe5, 0xd9, 0x0d, 0x48, 0xbd, 0x93, 0xd1, 0x56, 0x8f, 0xd1, + 0x21, 0xdf, 0xc8, 0xf1, 0x52, 0x58, 0x95, 0xb7, 0xff, 0x52, 0x11, 0x26, 0x92, 0x51, 0x01, 0xd0, + 0x15, 0x18, 0xe0, 0x22, 0xa5, 0x60, 0xdf, 0xc1, 0x27, 0x48, 0x8b, 0x25, 0xc0, 0x0e, 0x57, 0x21, + 0x95, 0x8a, 0xf2, 0xe8, 0x4d, 0x18, 0xae, 0xfb, 0xb7, 0xbd, 0xdb, 0x4e, 0x50, 0x9f, 0xab, 0xac, + 0x88, 0xe9, 0x9c, 0xa9, 0xa8, 0x58, 0x8c, 0xc9, 0xf4, 0xf8, 0x04, 0xcc, 0xfe, 0x1d, 0xa3, 0xb0, + 0xce, 0x0e, 0xad, 0xb3, 0xfc, 0x14, 0x9b, 0xee, 0xd6, 0xaa, 0xd3, 0xea, 0xf4, 0xfe, 0x65, 0x41, + 0x12, 0x69, 0x9c, 0x47, 0x45, 0x12, 0x0b, 0x8e, 0xc0, 0x31, 0x23, 0xf4, 0x19, 0x98, 0x0a, 0x73, + 0x4c, 0x27, 0x79, 0x99, 0x47, 0x3b, 0x59, 0x13, 0xe6, 0x1f, 0xba, 0xb7, 0x5f, 0x9e, 0xca, 0x32, + 0xb2, 0x64, 0x55, 0x63, 0x7f, 0xf1, 0x04, 0x18, 0x8b, 0xd8, 0x48, 0x44, 0x6d, 0x1d, 0x51, 0x22, + 0x6a, 0x0c, 0x43, 0xa4, 0xd9, 0x8a, 0xf6, 0x16, 0xdd, 0x40, 0x8c, 0x49, 0x26, 0xcf, 0x25, 0x41, + 0x93, 0xe6, 0x29, 0x31, 0x58, 0xf1, 0xc9, 0xce, 0x16, 0x5e, 0xfc, 0x16, 0x66, 0x0b, 0xef, 0x3b, + 0xc6, 0x6c, 0xe1, 0x6b, 0x30, 0xb8, 0xe5, 0x46, 0x98, 0xb4, 0x7c, 0x71, 0x99, 0xcb, 0x9c, 0x87, + 0x97, 0x39, 0x49, 0x3a, 0x2f, 0xad, 0x40, 0x60, 0xc9, 0x04, 0xbd, 0xa6, 0x56, 0xe0, 0x40, 0xbe, + 0xc2, 0x25, 0xed, 0xbc, 0x92, 0xb9, 0x06, 0x45, 0x4e, 0xf0, 0xc1, 0xfb, 0xcd, 0x09, 0xbe, 0x2c, + 0x33, 0x79, 0x0f, 0xe5, 0x3f, 0x56, 0x63, 0x89, 0xba, 0xbb, 0xe4, 0xef, 0xbe, 0xa9, 0x67, 0x3f, + 0x2f, 0xe5, 0xef, 0x04, 0x2a, 0xb1, 0x79, 0x8f, 0x39, 0xcf, 0xbf, 0xdf, 0x82, 0x93, 0xc9, 0xec, + 0xa4, 0xec, 0x4d, 0x85, 0xf0, 0xf3, 0x78, 0xb1, 0x97, 0x74, 0xb1, 0xac, 0x80, 0x51, 0x21, 0xd3, + 0x91, 0x66, 0x92, 0xe1, 0xec, 0xea, 0x68, 0x47, 0x07, 0x1b, 0x75, 0xe1, 0x6f, 0xf0, 0x58, 0x4e, + 0xf2, 0xf4, 0x0e, 0x29, 0xd3, 0xd7, 0x33, 0x12, 0x75, 0x3f, 0x9e, 0x97, 0xa8, 0xbb, 0xe7, 0xf4, + 0xdc, 0xaf, 0xa9, 0xb4, 0xe9, 0xa3, 0xf9, 0x53, 0x89, 0x27, 0x45, 0xef, 0x9a, 0x2c, 0xfd, 0x35, + 0x95, 0x2c, 0xbd, 0x43, 0x44, 0x6e, 0x9e, 0x0a, 0xbd, 0x6b, 0x8a, 0x74, 0x2d, 0xcd, 0xf9, 0xf8, + 0xd1, 0xa4, 0x39, 0x37, 0x8e, 0x1a, 0x9e, 0x69, 0xfb, 0xe9, 0x2e, 0x47, 0x8d, 0xc1, 0xb7, 0xf3, + 0x61, 0xc3, 0x53, 0xba, 0x4f, 0xde, 0x57, 0x4a, 0xf7, 0x9b, 0x7a, 0x8a, 0x74, 0xd4, 0x25, 0x07, + 0x38, 0x25, 0xea, 0x31, 0x31, 0xfa, 0x4d, 0xfd, 0x00, 0x9c, 0xca, 0xe7, 0xab, 0xce, 0xb9, 0x34, + 0xdf, 0xcc, 0x23, 0x30, 0x95, 0x70, 0xfd, 0xc4, 0xf1, 0x24, 0x5c, 0x3f, 0x79, 0xe4, 0x09, 0xd7, + 0x4f, 0x1d, 0x43, 0xc2, 0xf5, 0x87, 0x8e, 0x31, 0xe1, 0xfa, 0x4d, 0xe6, 0x1c, 0xc5, 0x03, 0x40, + 0x89, 0x08, 0xe2, 0x4f, 0xe5, 0xc4, 0x4f, 0x4b, 0x47, 0x89, 0xe2, 0x1f, 0xa7, 0x50, 0x38, 0x66, + 0x95, 0x91, 0xc8, 0x7d, 0xfa, 0x01, 0x24, 0x72, 0x5f, 0x8b, 0x13, 0xb9, 0x9f, 0xce, 0x1f, 0xea, + 0x8c, 0xe7, 0x34, 0x39, 0xe9, 0xdb, 0x6f, 0xea, 0x69, 0xd7, 0x1f, 0xee, 0x60, 0x05, 0xcb, 0x52, + 0x28, 0x77, 0x48, 0xb6, 0xfe, 0x2a, 0x4f, 0xb6, 0xfe, 0x48, 0xfe, 0x4e, 0x9e, 0x3c, 0xee, 0x8c, + 0x14, 0xeb, 0xb4, 0x5d, 0x2a, 0xf6, 0x2a, 0x8b, 0x95, 0x9e, 0xd3, 0x2e, 0x15, 0xbc, 0x35, 0xdd, + 0x2e, 0x85, 0xc2, 0x31, 0x2b, 0xfb, 0x07, 0x0b, 0x70, 0xb6, 0xf3, 0x7a, 0x8b, 0xb5, 0xe4, 0x95, + 0xd8, 0x21, 0x20, 0xa1, 0x25, 0xe7, 0x77, 0xb6, 0x98, 0xaa, 0xe7, 0x78, 0x90, 0x97, 0x61, 0x52, + 0xbd, 0xc3, 0x69, 0xb8, 0xb5, 0xbd, 0xb5, 0xf8, 0x9a, 0xac, 0x22, 0x27, 0x54, 0x93, 0x04, 0x38, + 0x5d, 0x06, 0xcd, 0xc1, 0xb8, 0x01, 0x5c, 0x59, 0x14, 0x77, 0xb3, 0x38, 0x3a, 0xb7, 0x89, 0xc6, + 0x49, 0x7a, 0xfb, 0x4b, 0x16, 0x3c, 0x94, 0x93, 0xa9, 0xb4, 0xe7, 0x70, 0x87, 0x9b, 0x30, 0xde, + 0x32, 0x8b, 0x76, 0x89, 0xd0, 0x6a, 0xe4, 0x43, 0x55, 0x6d, 0x4d, 0x20, 0x70, 0x92, 0xa9, 0xfd, + 0xb3, 0x05, 0x38, 0xd3, 0xd1, 0xb1, 0x14, 0x61, 0x38, 0xb5, 0xd5, 0x0c, 0x9d, 0x85, 0x80, 0xd4, + 0x89, 0x17, 0xb9, 0x4e, 0xa3, 0xda, 0x22, 0x35, 0xcd, 0xce, 0xc1, 0x3c, 0x34, 0x2f, 0xaf, 0x56, + 0xe7, 0xd2, 0x14, 0x38, 0xa7, 0x24, 0x5a, 0x06, 0x94, 0xc6, 0x88, 0x11, 0x66, 0x51, 0xf7, 0xd3, + 0xfc, 0x70, 0x46, 0x09, 0xf4, 0x01, 0x18, 0x55, 0x0e, 0xab, 0xda, 0x88, 0xb3, 0x8d, 0x1d, 0xeb, + 0x08, 0x6c, 0xd2, 0xa1, 0x4b, 0x3c, 0x6d, 0x83, 0x48, 0xf0, 0x21, 0x8c, 0x22, 0xe3, 0x32, 0x27, + 0x83, 0x00, 0x63, 0x9d, 0x66, 0xfe, 0xa5, 0xdf, 0xfc, 0xc6, 0xd9, 0xf7, 0xfc, 0xf6, 0x37, 0xce, + 0xbe, 0xe7, 0xf7, 0xbe, 0x71, 0xf6, 0x3d, 0xdf, 0x7d, 0xef, 0xac, 0xf5, 0x9b, 0xf7, 0xce, 0x5a, + 0xbf, 0x7d, 0xef, 0xac, 0xf5, 0x7b, 0xf7, 0xce, 0x5a, 0x7f, 0x70, 0xef, 0xac, 0xf5, 0x85, 0x3f, + 0x3c, 0xfb, 0x9e, 0x37, 0x50, 0x1c, 0x40, 0xf4, 0x22, 0x1d, 0x9d, 0x8b, 0xbb, 0x97, 0xfe, 0x5f, + 0x00, 0x00, 0x00, 0xff, 0xff, 0xb0, 0x6c, 0x51, 0x7f, 0x2c, 0x10, 0x01, 0x00, } func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) { @@ -8714,6 +8752,22 @@ func (m *Container) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.ResizePolicy) > 0 { + for iNdEx := len(m.ResizePolicy) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResizePolicy[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + } if m.StartupProbe != nil { { size, err := m.StartupProbe.MarshalToSizedBuffer(dAtA[:i]) @@ -9022,6 +9076,39 @@ func (m *ContainerPort) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ContainerResizePolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerResizePolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerResizePolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.RestartPolicy) + copy(dAtA[i:], m.RestartPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RestartPolicy))) + i-- + dAtA[i] = 0x12 + i -= len(m.ResourceName) + copy(dAtA[i:], m.ResourceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *ContainerState) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -9231,6 +9318,47 @@ func (m *ContainerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Resources != nil { + { + size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + if len(m.AllocatedResources) > 0 { + keysForAllocatedResources := make([]string, 0, len(m.AllocatedResources)) + for k := range m.AllocatedResources { + keysForAllocatedResources = append(keysForAllocatedResources, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatedResources) + for iNdEx := len(keysForAllocatedResources) - 1; iNdEx >= 0; iNdEx-- { + v := m.AllocatedResources[ResourceName(keysForAllocatedResources[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForAllocatedResources[iNdEx]) + copy(dAtA[i:], keysForAllocatedResources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAllocatedResources[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x52 + } + } if m.Started != nil { i-- if *m.Started { @@ -9977,6 +10105,22 @@ func (m *EphemeralContainerCommon) MarshalToSizedBuffer(dAtA []byte) (int, error _ = i var l int _ = l + if len(m.ResizePolicy) > 0 { + for iNdEx := len(m.ResizePolicy) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResizePolicy[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + } if m.StartupProbe != nil { { size, err := m.StartupProbe.MarshalToSizedBuffer(dAtA[:i]) @@ -15792,6 +15936,11 @@ func (m *PodStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i -= len(m.Resize) + copy(dAtA[i:], m.Resize) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resize))) + i-- + dAtA[i] = 0x72 if len(m.EphemeralContainerStatuses) > 0 { for iNdEx := len(m.EphemeralContainerStatuses) - 1; iNdEx >= 0; iNdEx-- { { @@ -20815,6 +20964,12 @@ func (m *Container) Size() (n int) { l = m.StartupProbe.Size() n += 2 + l + sovGenerated(uint64(l)) } + if len(m.ResizePolicy) > 0 { + for _, e := range m.ResizePolicy { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } return n } @@ -20851,6 +21006,19 @@ func (m *ContainerPort) Size() (n int) { return n } +func (m *ContainerResizePolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ResourceName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RestartPolicy) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *ContainerState) Size() (n int) { if m == nil { return 0 @@ -20940,6 +21108,19 @@ func (m *ContainerStatus) Size() (n int) { if m.Started != nil { n += 2 } + if len(m.AllocatedResources) > 0 { + for k, v := range m.AllocatedResources { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.Resources != nil { + l = m.Resources.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -21282,6 +21463,12 @@ func (m *EphemeralContainerCommon) Size() (n int) { l = m.StartupProbe.Size() n += 2 + l + sovGenerated(uint64(l)) } + if len(m.ResizePolicy) > 0 { + for _, e := range m.ResizePolicy { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } return n } @@ -23363,6 +23550,8 @@ func (m *PodStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + l = len(m.Resize) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -25367,6 +25556,11 @@ func (this *Container) String() string { repeatedStringForVolumeDevices += strings.Replace(strings.Replace(f.String(), "VolumeDevice", "VolumeDevice", 1), `&`, ``, 1) + "," } repeatedStringForVolumeDevices += "}" + repeatedStringForResizePolicy := "[]ContainerResizePolicy{" + for _, f := range this.ResizePolicy { + repeatedStringForResizePolicy += strings.Replace(strings.Replace(f.String(), "ContainerResizePolicy", "ContainerResizePolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForResizePolicy += "}" s := strings.Join([]string{`&Container{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Image:` + fmt.Sprintf("%v", this.Image) + `,`, @@ -25390,6 +25584,7 @@ func (this *Container) String() string { `TerminationMessagePolicy:` + fmt.Sprintf("%v", this.TerminationMessagePolicy) + `,`, `VolumeDevices:` + repeatedStringForVolumeDevices + `,`, `StartupProbe:` + strings.Replace(this.StartupProbe.String(), "Probe", "Probe", 1) + `,`, + `ResizePolicy:` + repeatedStringForResizePolicy + `,`, `}`, }, "") return s @@ -25419,6 +25614,17 @@ func (this *ContainerPort) String() string { }, "") return s } +func (this *ContainerResizePolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerResizePolicy{`, + `ResourceName:` + fmt.Sprintf("%v", this.ResourceName) + `,`, + `RestartPolicy:` + fmt.Sprintf("%v", this.RestartPolicy) + `,`, + `}`, + }, "") + return s +} func (this *ContainerState) String() string { if this == nil { return "nil" @@ -25472,6 +25678,16 @@ func (this *ContainerStatus) String() string { if this == nil { return "nil" } + keysForAllocatedResources := make([]string, 0, len(this.AllocatedResources)) + for k := range this.AllocatedResources { + keysForAllocatedResources = append(keysForAllocatedResources, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatedResources) + mapStringForAllocatedResources := "ResourceList{" + for _, k := range keysForAllocatedResources { + mapStringForAllocatedResources += fmt.Sprintf("%v: %v,", k, this.AllocatedResources[ResourceName(k)]) + } + mapStringForAllocatedResources += "}" s := strings.Join([]string{`&ContainerStatus{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `State:` + strings.Replace(strings.Replace(this.State.String(), "ContainerState", "ContainerState", 1), `&`, ``, 1) + `,`, @@ -25482,6 +25698,8 @@ func (this *ContainerStatus) String() string { `ImageID:` + fmt.Sprintf("%v", this.ImageID) + `,`, `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, `Started:` + valueToStringGenerated(this.Started) + `,`, + `AllocatedResources:` + mapStringForAllocatedResources + `,`, + `Resources:` + strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1) + `,`, `}`, }, "") return s @@ -25713,6 +25931,11 @@ func (this *EphemeralContainerCommon) String() string { repeatedStringForVolumeDevices += strings.Replace(strings.Replace(f.String(), "VolumeDevice", "VolumeDevice", 1), `&`, ``, 1) + "," } repeatedStringForVolumeDevices += "}" + repeatedStringForResizePolicy := "[]ContainerResizePolicy{" + for _, f := range this.ResizePolicy { + repeatedStringForResizePolicy += strings.Replace(strings.Replace(f.String(), "ContainerResizePolicy", "ContainerResizePolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForResizePolicy += "}" s := strings.Join([]string{`&EphemeralContainerCommon{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Image:` + fmt.Sprintf("%v", this.Image) + `,`, @@ -25736,6 +25959,7 @@ func (this *EphemeralContainerCommon) String() string { `TerminationMessagePolicy:` + fmt.Sprintf("%v", this.TerminationMessagePolicy) + `,`, `VolumeDevices:` + repeatedStringForVolumeDevices + `,`, `StartupProbe:` + strings.Replace(this.StartupProbe.String(), "Probe", "Probe", 1) + `,`, + `ResizePolicy:` + repeatedStringForResizePolicy + `,`, `}`, }, "") return s @@ -27323,6 +27547,7 @@ func (this *PodStatus) String() string { `NominatedNodeName:` + fmt.Sprintf("%v", this.NominatedNodeName) + `,`, `PodIPs:` + repeatedStringForPodIPs + `,`, `EphemeralContainerStatuses:` + repeatedStringForEphemeralContainerStatuses + `,`, + `Resize:` + fmt.Sprintf("%v", this.Resize) + `,`, `}`, }, "") return s @@ -33866,6 +34091,40 @@ func (m *Container) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResizePolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResizePolicy = append(m.ResizePolicy, ContainerResizePolicy{}) + if err := m.ResizePolicy[len(m.ResizePolicy)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -34172,6 +34431,120 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { } return nil } +func (m *ContainerResizePolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerResizePolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerResizePolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceName = ResourceName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RestartPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RestartPolicy = ResourceResizeRestartPolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ContainerState) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -35060,6 +35433,171 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { } b := bool(v != 0) m.Started = &b + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllocatedResources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AllocatedResources == nil { + m.AllocatedResources = make(ResourceList) + } + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.AllocatedResources[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resources == nil { + m.Resources = &ResourceRequirements{} + } + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -37706,6 +38244,40 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResizePolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResizePolicy = append(m.ResizePolicy, ContainerResizePolicy{}) + if err := m.ResizePolicy[len(m.ResizePolicy)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -55879,6 +56451,38 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resize", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resize = PodResizeStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -65016,7 +65620,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ExternalTrafficPolicy = ServiceExternalTrafficPolicyType(dAtA[iNdEx:postIndex]) + m.ExternalTrafficPolicy = ServiceExternalTrafficPolicy(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 12: if wireType != 0 { @@ -65274,7 +65878,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := ServiceInternalTrafficPolicyType(dAtA[iNdEx:postIndex]) + s := ServiceInternalTrafficPolicy(dAtA[iNdEx:postIndex]) m.InternalTrafficPolicy = &s iNdEx = postIndex default: diff --git a/vendor/k8s.io/api/core/v1/generated.proto b/vendor/k8s.io/api/core/v1/generated.proto index 22bc1c801b..8ef67ca40b 100644 --- a/vendor/k8s.io/api/core/v1/generated.proto +++ b/vendor/k8s.io/api/core/v1/generated.proto @@ -220,7 +220,6 @@ message CSIPersistentVolumeSource { // controllerExpandSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // ControllerExpandVolume call. - // This is an beta field and requires enabling ExpandCSIVolumes feature gate. // This field is optional, and may be empty if no secret is required. If the // secret object contains more than one secret, all secrets are passed. // +optional @@ -229,9 +228,10 @@ message CSIPersistentVolumeSource { // nodeExpandSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // NodeExpandVolume call. - // This is an alpha field and requires enabling CSINodeExpandSecret feature gate. + // This is a beta field which is enabled default by CSINodeExpandSecret feature gate. // This field is optional, may be omitted if no secret is required. If the // secret object contains more than one secret, all secrets are passed. + // +featureGate=CSINodeExpandSecret // +optional optional SecretReference nodeExpandSecretRef = 10; } @@ -723,6 +723,12 @@ message Container { // +optional optional ResourceRequirements resources = 8; + // Resources resize policy for the container. + // +featureGate=InPlacePodVerticalScaling + // +optional + // +listType=atomic + repeated ContainerResizePolicy resizePolicy = 23; + // Pod volumes to mount into the container's filesystem. // Cannot be updated. // +optional @@ -863,6 +869,17 @@ message ContainerPort { optional string hostIP = 5; } +// ContainerResizePolicy represents resource resize policy for the container. +message ContainerResizePolicy { + // Name of the resource to which this resource resize policy applies. + // Supported values: cpu, memory. + optional string resourceName = 1; + + // Restart policy to apply when specified resource is resized. + // If not specified, it defaults to NotRequired. + optional string restartPolicy = 2; +} + // ContainerState holds a possible state of container. // Only one of its members may be specified. // If none of them is specified, the default one is ContainerStateWaiting. @@ -930,41 +947,76 @@ message ContainerStateWaiting { // ContainerStatus contains details for the current status of this container. message ContainerStatus { - // This must be a DNS_LABEL. Each container in a pod must have a unique name. + // Name is a DNS_LABEL representing the unique name of the container. + // Each container in a pod must have a unique name across all container types. // Cannot be updated. optional string name = 1; - // Details about the container's current condition. + // State holds details about the container's current condition. // +optional optional ContainerState state = 2; - // Details about the container's last termination condition. + // LastTerminationState holds the last termination state of the container to + // help debug container crashes and restarts. This field is not + // populated if the container is still running and RestartCount is 0. // +optional optional ContainerState lastState = 3; - // Specifies whether the container has passed its readiness probe. + // Ready specifies whether the container is currently passing its readiness check. + // The value will change as readiness probes keep executing. If no readiness + // probes are specified, this field defaults to true once the container is + // fully started (see Started field). + // + // The value is typically used to determine whether a container is ready to + // accept traffic. optional bool ready = 4; - // The number of times the container has been restarted. + // RestartCount holds the number of times the container has been restarted. + // Kubelet makes an effort to always increment the value, but there + // are cases when the state may be lost due to node restarts and then the value + // may be reset to 0. The value is never negative. optional int32 restartCount = 5; - // The image the container is running. + // Image is the name of container image that the container is running. + // The container image may not match the image used in the PodSpec, + // as it may have been resolved by the runtime. // More info: https://kubernetes.io/docs/concepts/containers/images. optional string image = 6; - // ImageID of the container's image. + // ImageID is the image ID of the container's image. The image ID may not + // match the image ID of the image used in the PodSpec, as it may have been + // resolved by the runtime. optional string imageID = 7; - // Container's ID in the format '://'. + // ContainerID is the ID of the container in the format '://'. + // Where type is a container runtime identifier, returned from Version call of CRI API + // (for example "containerd"). // +optional optional string containerID = 8; - // Specifies whether the container has passed its startup probe. - // Initialized as false, becomes true after startupProbe is considered successful. - // Resets to false when the container is restarted, or if kubelet loses state temporarily. - // Is always true when no startupProbe is defined. + // Started indicates whether the container has finished its postStart lifecycle hook + // and passed its startup probe. + // Initialized as false, becomes true after startupProbe is considered + // successful. Resets to false when the container is restarted, or if kubelet + // loses state temporarily. In both cases, startup probes will run again. + // Is always true when no startupProbe is defined and container is running and + // has passed the postStart lifecycle hook. The null value must be treated the + // same as false. // +optional optional bool started = 9; + + // AllocatedResources represents the compute resources allocated for this container by the + // node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission + // and after successfully admitting desired pod resize. + // +featureGate=InPlacePodVerticalScaling + // +optional + map allocatedResources = 10; + + // Resources represents the compute resource requests and limits that have been successfully + // enacted on the running container after it has been started or has been successfully resized. + // +featureGate=InPlacePodVerticalScaling + // +optional + optional ResourceRequirements resources = 11; } // DaemonEndpoint contains information about a single Daemon endpoint. @@ -1040,7 +1092,7 @@ message EmptyDirVolumeSource { // The maximum usage on memory medium EmptyDir would be the minimum value between // the SizeLimit specified here and the sum of memory limits of all containers in a pod. // The default is nil which means that the limit is undefined. - // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir + // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir // +optional optional k8s.io.apimachinery.pkg.api.resource.Quantity sizeLimit = 2; } @@ -1049,11 +1101,8 @@ message EmptyDirVolumeSource { // +structType=atomic message EndpointAddress { // The IP of this endpoint. - // May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), - // or link-local multicast ((224.0.0.0/24). - // IPv6 is also accepted but not fully supported on all platforms. Also, certain - // kubernetes components, like kube-proxy, are not IPv6 ready. - // TODO: This should allow hostname or IP, See #4447. + // May not be loopback (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 or fe80::/10), + // or link-local multicast (224.0.0.0/24 or ff02::/16). optional string ip = 1; // The Hostname of this endpoint @@ -1089,10 +1138,17 @@ message EndpointPort { optional string protocol = 3; // The application protocol for this port. + // This is used as a hint for implementations to offer richer behavior for protocols that they understand. // This field follows standard Kubernetes label syntax. - // Un-prefixed names are reserved for IANA standard service names (as per + // Valid values are either: + // + // * Un-prefixed protocol names - reserved for IANA standard service names (as per // RFC-6335 and https://www.iana.org/assignments/service-names). - // Non-standard protocols should use prefixed names such as + // + // * Kubernetes-defined prefixed names: + // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 + // + // * Other protocols should use implementation-defined prefixed names such as // mycompany.com/my-custom-protocol. // +optional optional string appProtocol = 4; @@ -1324,6 +1380,12 @@ message EphemeralContainerCommon { // +optional optional ResourceRequirements resources = 8; + // Resources resize policy for the container. + // +featureGate=InPlacePodVerticalScaling + // +optional + // +listType=atomic + repeated ContainerResizePolicy resizePolicy = 23; + // Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. // Cannot be updated. // +optional @@ -2485,6 +2547,10 @@ message NodeStatus { // Note: This field is declared as mergeable, but the merge key is not sufficiently // unique, which can cause data corruption when it is merged. Callers should instead // use a full-replacement patch. See https://pr.k8s.io/79391 for an example. + // Consumers should assume that addresses can change during the + // lifetime of a Node. However, there are some exceptions where this may not + // be possible, such as Pods that inherit a Node's address in its own status or + // consumers of the downward API (status.hostIP). // +optional // +patchMergeKey=type // +patchStrategy=merge @@ -2666,7 +2732,7 @@ message PersistentVolumeClaim { optional PersistentVolumeClaimStatus status = 3; } -// PersistentVolumeClaimCondition contails details about state of pvc +// PersistentVolumeClaimCondition contains details about state of pvc message PersistentVolumeClaimCondition { optional string type = 1; @@ -3554,7 +3620,7 @@ message PodSpec { repeated EphemeralContainer ephemeralContainers = 34; // Restart policy for all containers within the pod. - // One of Always, OnFailure, Never. + // One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. // Default to Always. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy // +optional @@ -3809,14 +3875,19 @@ message PodSpec { optional bool hostUsers = 37; // SchedulingGates is an opaque list of values that if specified will block scheduling the pod. - // More info: https://git.k8s.io/enhancements/keps/sig-scheduling/3521-pod-scheduling-readiness. + // If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the + // scheduler will not attempt to schedule the pod. + // + // SchedulingGates can only be set at pod creation time, and be removed only afterwards. + // + // This is a beta feature enabled by the PodSchedulingReadiness feature gate. // - // This is an alpha-level feature enabled by PodSchedulingReadiness feature gate. - // +optional // +patchMergeKey=name // +patchStrategy=merge // +listType=map // +listMapKey=name + // +featureGate=PodSchedulingReadiness + // +optional repeated PodSchedulingGate schedulingGates = 38; // ResourceClaims defines which ResourceClaims must be allocated @@ -3924,13 +3995,20 @@ message PodStatus { // The Quality of Service (QOS) classification assigned to the pod based on resource requirements // See PodQOSClass type for available QOS classes - // More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes // +optional optional string qosClass = 9; // Status for any ephemeral containers that have run in this pod. // +optional repeated ContainerStatus ephemeralContainerStatuses = 13; + + // Status of resources resize desired for pod's containers. + // It is empty if no resources resize is pending. + // Any changes to container resources will automatically set this to "Proposed" + // +featureGate=InPlacePodVerticalScaling + // +optional + optional string resize = 14; } // PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded @@ -4123,8 +4201,6 @@ message ProbeHandler { optional TCPSocketAction tcpSocket = 3; // GRPC specifies an action involving a GRPC port. - // This is a beta field and requires enabling GRPCContainerProbe feature gate. - // +featureGate=GRPCContainerProbe // +optional optional GRPCAction grpc = 4; } @@ -4374,6 +4450,7 @@ message ReplicationControllerSpec { // Template is the object that describes the pod that will be created if // insufficient replicas are detected. This takes precedence over a TemplateRef. + // The only allowed template.spec.restartPolicy value is "Always". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template // +optional optional PodTemplateSpec template = 3; @@ -4502,7 +4579,7 @@ message ResourceRequirements { // Requests describes the minimum amount of compute resources required. // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - // otherwise to an implementation-defined value. + // otherwise to an implementation-defined value. Requests cannot exceed Limits. // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ // +optional map requests = 2; @@ -5629,8 +5706,12 @@ message TopologySpreadConstraint { // spreading will be calculated. The keys are used to lookup values from the // incoming pod labels, those key-value labels are ANDed with labelSelector // to select the group of existing pods over which spreading will be calculated - // for the incoming pod. Keys that don't exist in the incoming pod labels will + // for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + // MatchLabelKeys cannot be set when LabelSelector isn't set. + // Keys that don't exist in the incoming pod labels will // be ignored. A null or empty list means only match against labelSelector. + // + // This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). // +listType=atomic // +optional repeated string matchLabelKeys = 8; diff --git a/vendor/k8s.io/api/core/v1/toleration.go b/vendor/k8s.io/api/core/v1/toleration.go index 9341abf891..e803d518b5 100644 --- a/vendor/k8s.io/api/core/v1/toleration.go +++ b/vendor/k8s.io/api/core/v1/toleration.go @@ -28,15 +28,13 @@ func (t *Toleration) MatchToleration(tolerationToMatch *Toleration) bool { // ToleratesTaint checks if the toleration tolerates the taint. // The matching follows the rules below: -// (1) Empty toleration.effect means to match all taint effects, // -// otherwise taint effect must equal to toleration.effect. -// -// (2) If toleration.operator is 'Exists', it means to match all taint values. -// (3) Empty toleration.key means to match all taint keys. -// -// If toleration.key is empty, toleration.operator must be 'Exists'; -// this combination means to match all taint values and all taint keys. +// 1. Empty toleration.effect means to match all taint effects, +// otherwise taint effect must equal to toleration.effect. +// 2. If toleration.operator is 'Exists', it means to match all taint values. +// 3. Empty toleration.key means to match all taint keys. +// If toleration.key is empty, toleration.operator must be 'Exists'; +// this combination means to match all taint values and all taint keys. func (t *Toleration) ToleratesTaint(taint *Taint) bool { if len(t.Effect) > 0 && t.Effect != taint.Effect { return false diff --git a/vendor/k8s.io/api/core/v1/types.go b/vendor/k8s.io/api/core/v1/types.go index bfb3b1d971..c831d5961c 100644 --- a/vendor/k8s.io/api/core/v1/types.go +++ b/vendor/k8s.io/api/core/v1/types.go @@ -577,7 +577,7 @@ const ( PersistentVolumeClaimNodeExpansionFailed PersistentVolumeClaimResizeStatus = "NodeExpansionFailed" ) -// PersistentVolumeClaimCondition contails details about state of pvc +// PersistentVolumeClaimCondition contains details about state of pvc type PersistentVolumeClaimCondition struct { Type PersistentVolumeClaimConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PersistentVolumeClaimConditionType"` Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` @@ -735,7 +735,7 @@ type EmptyDirVolumeSource struct { // The maximum usage on memory medium EmptyDir would be the minimum value between // the SizeLimit specified here and the sum of memory limits of all containers in a pod. // The default is nil which means that the limit is undefined. - // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir + // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir // +optional SizeLimit *resource.Quantity `json:"sizeLimit,omitempty" protobuf:"bytes,2,opt,name=sizeLimit"` } @@ -1826,7 +1826,6 @@ type CSIPersistentVolumeSource struct { // controllerExpandSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // ControllerExpandVolume call. - // This is an beta field and requires enabling ExpandCSIVolumes feature gate. // This field is optional, and may be empty if no secret is required. If the // secret object contains more than one secret, all secrets are passed. // +optional @@ -1835,9 +1834,10 @@ type CSIPersistentVolumeSource struct { // nodeExpandSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // NodeExpandVolume call. - // This is an alpha field and requires enabling CSINodeExpandSecret feature gate. + // This is a beta field which is enabled default by CSINodeExpandSecret feature gate. // This field is optional, may be omitted if no secret is required. If the // secret object contains more than one secret, all secrets are passed. + // +featureGate=CSINodeExpandSecret // +optional NodeExpandSecretRef *SecretReference `json:"nodeExpandSecretRef,omitempty" protobuf:"bytes,10,opt,name=nodeExpandSecretRef"` } @@ -2265,6 +2265,33 @@ const ( PullIfNotPresent PullPolicy = "IfNotPresent" ) +// ResourceResizeRestartPolicy specifies how to handle container resource resize. +type ResourceResizeRestartPolicy string + +// These are the valid resource resize restart policy values: +const ( + // 'NotRequired' means Kubernetes will try to resize the container + // without restarting it, if possible. Kubernetes may however choose to + // restart the container if it is unable to actuate resize without a + // restart. For e.g. the runtime doesn't support restart-free resizing. + NotRequired ResourceResizeRestartPolicy = "NotRequired" + // 'RestartContainer' means Kubernetes will resize the container in-place + // by stopping and starting the container when new resources are applied. + // This is needed for legacy applications. For e.g. java apps using the + // -xmxN flag which are unable to use resized memory without restarting. + RestartContainer ResourceResizeRestartPolicy = "RestartContainer" +) + +// ContainerResizePolicy represents resource resize policy for the container. +type ContainerResizePolicy struct { + // Name of the resource to which this resource resize policy applies. + // Supported values: cpu, memory. + ResourceName ResourceName `json:"resourceName" protobuf:"bytes,1,opt,name=resourceName,casttype=ResourceName"` + // Restart policy to apply when specified resource is resized. + // If not specified, it defaults to NotRequired. + RestartPolicy ResourceResizeRestartPolicy `json:"restartPolicy" protobuf:"bytes,2,opt,name=restartPolicy,casttype=ResourceResizeRestartPolicy"` +} + // PreemptionPolicy describes a policy for if/when to preempt a pod. // +enum type PreemptionPolicy string @@ -2311,7 +2338,7 @@ type ResourceRequirements struct { Limits ResourceList `json:"limits,omitempty" protobuf:"bytes,1,rep,name=limits,casttype=ResourceList,castkey=ResourceName"` // Requests describes the minimum amount of compute resources required. // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - // otherwise to an implementation-defined value. + // otherwise to an implementation-defined value. Requests cannot exceed Limits. // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ // +optional Requests ResourceList `json:"requests,omitempty" protobuf:"bytes,2,rep,name=requests,casttype=ResourceList,castkey=ResourceName"` @@ -2414,6 +2441,11 @@ type Container struct { // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ // +optional Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` + // Resources resize policy for the container. + // +featureGate=InPlacePodVerticalScaling + // +optional + // +listType=atomic + ResizePolicy []ContainerResizePolicy `json:"resizePolicy,omitempty" protobuf:"bytes,23,rep,name=resizePolicy"` // Pod volumes to mount into the container's filesystem. // Cannot be updated. // +optional @@ -2518,8 +2550,6 @@ type ProbeHandler struct { TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty" protobuf:"bytes,3,opt,name=tcpSocket"` // GRPC specifies an action involving a GRPC port. - // This is a beta field and requires enabling GRPCContainerProbe feature gate. - // +featureGate=GRPCContainerProbe // +optional GRPC *GRPCAction `json:"grpc,omitempty" protobuf:"bytes,4,opt,name=grpc"` } @@ -2633,33 +2663,66 @@ type ContainerState struct { // ContainerStatus contains details for the current status of this container. type ContainerStatus struct { - // This must be a DNS_LABEL. Each container in a pod must have a unique name. + // Name is a DNS_LABEL representing the unique name of the container. + // Each container in a pod must have a unique name across all container types. // Cannot be updated. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Details about the container's current condition. + // State holds details about the container's current condition. // +optional State ContainerState `json:"state,omitempty" protobuf:"bytes,2,opt,name=state"` - // Details about the container's last termination condition. + // LastTerminationState holds the last termination state of the container to + // help debug container crashes and restarts. This field is not + // populated if the container is still running and RestartCount is 0. // +optional LastTerminationState ContainerState `json:"lastState,omitempty" protobuf:"bytes,3,opt,name=lastState"` - // Specifies whether the container has passed its readiness probe. + // Ready specifies whether the container is currently passing its readiness check. + // The value will change as readiness probes keep executing. If no readiness + // probes are specified, this field defaults to true once the container is + // fully started (see Started field). + // + // The value is typically used to determine whether a container is ready to + // accept traffic. Ready bool `json:"ready" protobuf:"varint,4,opt,name=ready"` - // The number of times the container has been restarted. + // RestartCount holds the number of times the container has been restarted. + // Kubelet makes an effort to always increment the value, but there + // are cases when the state may be lost due to node restarts and then the value + // may be reset to 0. The value is never negative. RestartCount int32 `json:"restartCount" protobuf:"varint,5,opt,name=restartCount"` - // The image the container is running. + // Image is the name of container image that the container is running. + // The container image may not match the image used in the PodSpec, + // as it may have been resolved by the runtime. // More info: https://kubernetes.io/docs/concepts/containers/images. Image string `json:"image" protobuf:"bytes,6,opt,name=image"` - // ImageID of the container's image. + // ImageID is the image ID of the container's image. The image ID may not + // match the image ID of the image used in the PodSpec, as it may have been + // resolved by the runtime. ImageID string `json:"imageID" protobuf:"bytes,7,opt,name=imageID"` - // Container's ID in the format '://'. + // ContainerID is the ID of the container in the format '://'. + // Where type is a container runtime identifier, returned from Version call of CRI API + // (for example "containerd"). // +optional ContainerID string `json:"containerID,omitempty" protobuf:"bytes,8,opt,name=containerID"` - // Specifies whether the container has passed its startup probe. - // Initialized as false, becomes true after startupProbe is considered successful. - // Resets to false when the container is restarted, or if kubelet loses state temporarily. - // Is always true when no startupProbe is defined. + // Started indicates whether the container has finished its postStart lifecycle hook + // and passed its startup probe. + // Initialized as false, becomes true after startupProbe is considered + // successful. Resets to false when the container is restarted, or if kubelet + // loses state temporarily. In both cases, startup probes will run again. + // Is always true when no startupProbe is defined and container is running and + // has passed the postStart lifecycle hook. The null value must be treated the + // same as false. // +optional Started *bool `json:"started,omitempty" protobuf:"varint,9,opt,name=started"` + // AllocatedResources represents the compute resources allocated for this container by the + // node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission + // and after successfully admitting desired pod resize. + // +featureGate=InPlacePodVerticalScaling + // +optional + AllocatedResources ResourceList `json:"allocatedResources,omitempty" protobuf:"bytes,10,rep,name=allocatedResources,casttype=ResourceList,castkey=ResourceName"` + // Resources represents the compute resource requests and limits that have been successfully + // enacted on the running container after it has been started or has been successfully resized. + // +featureGate=InPlacePodVerticalScaling + // +optional + Resources *ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,11,opt,name=resources"` } // PodPhase is a label for the condition of a pod at the current time. @@ -2723,6 +2786,10 @@ const ( // TerminationByKubelet reason in DisruptionTarget pod condition indicates that the termination // is initiated by kubelet PodReasonTerminationByKubelet = "TerminationByKubelet" + + // PodReasonPreemptionByScheduler reason in DisruptionTarget pod condition indicates that the + // disruption was initiated by scheduler's preemption. + PodReasonPreemptionByScheduler = "PreemptionByScheduler" ) // PodCondition contains details for the current condition of this pod. @@ -2748,6 +2815,20 @@ type PodCondition struct { Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` } +// PodResizeStatus shows status of desired resize of a pod's containers. +type PodResizeStatus string + +const ( + // Pod resources resize has been requested and will be evaluated by node. + PodResizeStatusProposed PodResizeStatus = "Proposed" + // Pod resources resize has been accepted by node and is being actuated. + PodResizeStatusInProgress PodResizeStatus = "InProgress" + // Node cannot resize the pod at this time and will keep retrying. + PodResizeStatusDeferred PodResizeStatus = "Deferred" + // Requested pod resize is not feasible and will not be re-evaluated. + PodResizeStatusInfeasible PodResizeStatus = "Infeasible" +) + // RestartPolicy describes how the container should be restarted. // Only one of the following restart policies may be specified. // If none of the following policies is specified, the default one @@ -3158,7 +3239,7 @@ type PodSpec struct { // +patchStrategy=merge EphemeralContainers []EphemeralContainer `json:"ephemeralContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,34,rep,name=ephemeralContainers"` // Restart policy for all containers within the pod. - // One of Always, OnFailure, Never. + // One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. // Default to Always. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy // +optional @@ -3384,14 +3465,19 @@ type PodSpec struct { HostUsers *bool `json:"hostUsers,omitempty" protobuf:"bytes,37,opt,name=hostUsers"` // SchedulingGates is an opaque list of values that if specified will block scheduling the pod. - // More info: https://git.k8s.io/enhancements/keps/sig-scheduling/3521-pod-scheduling-readiness. + // If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the + // scheduler will not attempt to schedule the pod. + // + // SchedulingGates can only be set at pod creation time, and be removed only afterwards. + // + // This is a beta feature enabled by the PodSchedulingReadiness feature gate. // - // This is an alpha-level feature enabled by PodSchedulingReadiness feature gate. - // +optional // +patchMergeKey=name // +patchStrategy=merge // +listType=map // +listMapKey=name + // +featureGate=PodSchedulingReadiness + // +optional SchedulingGates []PodSchedulingGate `json:"schedulingGates,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,38,opt,name=schedulingGates"` // ResourceClaims defines which ResourceClaims must be allocated // and reserved before the Pod is allowed to start. The resources @@ -3612,8 +3698,12 @@ type TopologySpreadConstraint struct { // spreading will be calculated. The keys are used to lookup values from the // incoming pod labels, those key-value labels are ANDed with labelSelector // to select the group of existing pods over which spreading will be calculated - // for the incoming pod. Keys that don't exist in the incoming pod labels will + // for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + // MatchLabelKeys cannot be set when LabelSelector isn't set. + // Keys that don't exist in the incoming pod labels will // be ignored. A null or empty list means only match against labelSelector. + // + // This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). // +listType=atomic // +optional MatchLabelKeys []string `json:"matchLabelKeys,omitempty" protobuf:"bytes,8,opt,name=matchLabelKeys"` @@ -3881,6 +3971,11 @@ type EphemeralContainerCommon struct { // already allocated to the pod. // +optional Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` + // Resources resize policy for the container. + // +featureGate=InPlacePodVerticalScaling + // +optional + // +listType=atomic + ResizePolicy []ContainerResizePolicy `json:"resizePolicy,omitempty" protobuf:"bytes,23,rep,name=resizePolicy"` // Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. // Cannot be updated. // +optional @@ -4066,12 +4161,19 @@ type PodStatus struct { ContainerStatuses []ContainerStatus `json:"containerStatuses,omitempty" protobuf:"bytes,8,rep,name=containerStatuses"` // The Quality of Service (QOS) classification assigned to the pod based on resource requirements // See PodQOSClass type for available QOS classes - // More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes // +optional QOSClass PodQOSClass `json:"qosClass,omitempty" protobuf:"bytes,9,rep,name=qosClass"` // Status for any ephemeral containers that have run in this pod. // +optional EphemeralContainerStatuses []ContainerStatus `json:"ephemeralContainerStatuses,omitempty" protobuf:"bytes,13,rep,name=ephemeralContainerStatuses"` + + // Status of resources resize desired for pod's containers. + // It is empty if no resources resize is pending. + // Any changes to container resources will automatically set this to "Proposed" + // +featureGate=InPlacePodVerticalScaling + // +optional + Resize PodResizeStatus `json:"resize,omitempty" protobuf:"bytes,14,opt,name=resize,casttype=PodResizeStatus"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -4210,6 +4312,7 @@ type ReplicationControllerSpec struct { // Template is the object that describes the pod that will be created if // insufficient replicas are detected. This takes precedence over a TemplateRef. + // The only allowed template.spec.restartPolicy value is "Always". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template // +optional Template *PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"` @@ -4370,34 +4473,47 @@ const ( ServiceTypeExternalName ServiceType = "ExternalName" ) -// ServiceInternalTrafficPolicyType describes how nodes distribute service traffic they +// ServiceInternalTrafficPolicy describes how nodes distribute service traffic they // receive on the ClusterIP. // +enum -type ServiceInternalTrafficPolicyType string +type ServiceInternalTrafficPolicy string const ( // ServiceInternalTrafficPolicyCluster routes traffic to all endpoints. - ServiceInternalTrafficPolicyCluster ServiceInternalTrafficPolicyType = "Cluster" + ServiceInternalTrafficPolicyCluster ServiceInternalTrafficPolicy = "Cluster" // ServiceInternalTrafficPolicyLocal routes traffic only to endpoints on the same // node as the client pod (dropping the traffic if there are no local endpoints). - ServiceInternalTrafficPolicyLocal ServiceInternalTrafficPolicyType = "Local" + ServiceInternalTrafficPolicyLocal ServiceInternalTrafficPolicy = "Local" ) -// ServiceExternalTrafficPolicyType describes how nodes distribute service traffic they +// for backwards compat +// +enum +type ServiceInternalTrafficPolicyType = ServiceInternalTrafficPolicy + +// ServiceExternalTrafficPolicy describes how nodes distribute service traffic they // receive on one of the Service's "externally-facing" addresses (NodePorts, ExternalIPs, -// and LoadBalancer IPs). +// and LoadBalancer IPs. // +enum -type ServiceExternalTrafficPolicyType string +type ServiceExternalTrafficPolicy string const ( - // ServiceExternalTrafficPolicyTypeCluster routes traffic to all endpoints. - ServiceExternalTrafficPolicyTypeCluster ServiceExternalTrafficPolicyType = "Cluster" + // ServiceExternalTrafficPolicyCluster routes traffic to all endpoints. + ServiceExternalTrafficPolicyCluster ServiceExternalTrafficPolicy = "Cluster" - // ServiceExternalTrafficPolicyTypeLocal preserves the source IP of the traffic by + // ServiceExternalTrafficPolicyLocal preserves the source IP of the traffic by // routing only to endpoints on the same node as the traffic was received on // (dropping the traffic if there are no local endpoints). - ServiceExternalTrafficPolicyTypeLocal ServiceExternalTrafficPolicyType = "Local" + ServiceExternalTrafficPolicyLocal ServiceExternalTrafficPolicy = "Local" +) + +// for backwards compat +// +enum +type ServiceExternalTrafficPolicyType = ServiceExternalTrafficPolicy + +const ( + ServiceExternalTrafficPolicyTypeLocal = ServiceExternalTrafficPolicyLocal + ServiceExternalTrafficPolicyTypeCluster = ServiceExternalTrafficPolicyCluster ) // These are the valid conditions of a service. @@ -4633,7 +4749,7 @@ type ServiceSpec struct { // a NodePort from within the cluster may need to take traffic policy into account // when picking a node. // +optional - ExternalTrafficPolicy ServiceExternalTrafficPolicyType `json:"externalTrafficPolicy,omitempty" protobuf:"bytes,11,opt,name=externalTrafficPolicy"` + ExternalTrafficPolicy ServiceExternalTrafficPolicy `json:"externalTrafficPolicy,omitempty" protobuf:"bytes,11,opt,name=externalTrafficPolicy"` // healthCheckNodePort specifies the healthcheck nodePort for the service. // This only applies when type is set to LoadBalancer and @@ -4730,7 +4846,7 @@ type ServiceSpec struct { // "Cluster", uses the standard behavior of routing to all endpoints evenly // (possibly modified by topology and other features). // +optional - InternalTrafficPolicy *ServiceInternalTrafficPolicyType `json:"internalTrafficPolicy,omitempty" protobuf:"bytes,22,opt,name=internalTrafficPolicy"` + InternalTrafficPolicy *ServiceInternalTrafficPolicy `json:"internalTrafficPolicy,omitempty" protobuf:"bytes,22,opt,name=internalTrafficPolicy"` } // ServicePort contains information on service's port. @@ -4951,11 +5067,8 @@ type EndpointSubset struct { // +structType=atomic type EndpointAddress struct { // The IP of this endpoint. - // May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), - // or link-local multicast ((224.0.0.0/24). - // IPv6 is also accepted but not fully supported on all platforms. Also, certain - // kubernetes components, like kube-proxy, are not IPv6 ready. - // TODO: This should allow hostname or IP, See #4447. + // May not be loopback (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 or fe80::/10), + // or link-local multicast (224.0.0.0/24 or ff02::/16). IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"` // The Hostname of this endpoint // +optional @@ -4988,10 +5101,17 @@ type EndpointPort struct { Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,3,opt,name=protocol,casttype=Protocol"` // The application protocol for this port. + // This is used as a hint for implementations to offer richer behavior for protocols that they understand. // This field follows standard Kubernetes label syntax. - // Un-prefixed names are reserved for IANA standard service names (as per + // Valid values are either: + // + // * Un-prefixed protocol names - reserved for IANA standard service names (as per // RFC-6335 and https://www.iana.org/assignments/service-names). - // Non-standard protocols should use prefixed names such as + // + // * Kubernetes-defined prefixed names: + // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 + // + // * Other protocols should use implementation-defined prefixed names such as // mycompany.com/my-custom-protocol. // +optional AppProtocol *string `json:"appProtocol,omitempty" protobuf:"bytes,4,opt,name=appProtocol"` @@ -5210,6 +5330,10 @@ type NodeStatus struct { // Note: This field is declared as mergeable, but the merge key is not sufficiently // unique, which can cause data corruption when it is merged. Callers should instead // use a full-replacement patch. See https://pr.k8s.io/79391 for an example. + // Consumers should assume that addresses can change during the + // lifetime of a Node. However, there are some exceptions where this may not + // be possible, such as Pods that inherit a Node's address in its own status or + // consumers of the downward API (status.hostIP). // +optional // +patchMergeKey=type // +patchStrategy=merge diff --git a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go index 7676749775..a01ae37173 100644 --- a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AWSElasticBlockStoreVolumeSource = map[string]string{ @@ -126,8 +126,8 @@ var map_CSIPersistentVolumeSource = map[string]string{ "controllerPublishSecretRef": "controllerPublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerPublishVolume and ControllerUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", "nodeStageSecretRef": "nodeStageSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeStageVolume and NodeStageVolume and NodeUnstageVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", "nodePublishSecretRef": "nodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", - "controllerExpandSecretRef": "controllerExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerExpandVolume call. This is an beta field and requires enabling ExpandCSIVolumes feature gate. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", - "nodeExpandSecretRef": "nodeExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeExpandVolume call. This is an alpha field and requires enabling CSINodeExpandSecret feature gate. This field is optional, may be omitted if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + "controllerExpandSecretRef": "controllerExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerExpandVolume call. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + "nodeExpandSecretRef": "nodeExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeExpandVolume call. This is a beta field which is enabled default by CSINodeExpandSecret feature gate. This field is optional, may be omitted if no secret is required. If the secret object contains more than one secret, all secrets are passed.", } func (CSIPersistentVolumeSource) SwaggerDoc() map[string]string { @@ -346,6 +346,7 @@ var map_Container = map[string]string{ "envFrom": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", "env": "List of environment variables to set in the container. Cannot be updated.", "resources": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + "resizePolicy": "Resources resize policy for the container.", "volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.", "volumeDevices": "volumeDevices is the list of block devices to be used by the container.", "livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", @@ -388,6 +389,16 @@ func (ContainerPort) SwaggerDoc() map[string]string { return map_ContainerPort } +var map_ContainerResizePolicy = map[string]string{ + "": "ContainerResizePolicy represents resource resize policy for the container.", + "resourceName": "Name of the resource to which this resource resize policy applies. Supported values: cpu, memory.", + "restartPolicy": "Restart policy to apply when specified resource is resized. If not specified, it defaults to NotRequired.", +} + +func (ContainerResizePolicy) SwaggerDoc() map[string]string { + return map_ContainerResizePolicy +} + var map_ContainerState = map[string]string{ "": "ContainerState holds a possible state of container. Only one of its members may be specified. If none of them is specified, the default one is ContainerStateWaiting.", "waiting": "Details about a waiting container", @@ -434,16 +445,18 @@ func (ContainerStateWaiting) SwaggerDoc() map[string]string { } var map_ContainerStatus = map[string]string{ - "": "ContainerStatus contains details for the current status of this container.", - "name": "This must be a DNS_LABEL. Each container in a pod must have a unique name. Cannot be updated.", - "state": "Details about the container's current condition.", - "lastState": "Details about the container's last termination condition.", - "ready": "Specifies whether the container has passed its readiness probe.", - "restartCount": "The number of times the container has been restarted.", - "image": "The image the container is running. More info: https://kubernetes.io/docs/concepts/containers/images.", - "imageID": "ImageID of the container's image.", - "containerID": "Container's ID in the format '://'.", - "started": "Specifies whether the container has passed its startup probe. Initialized as false, becomes true after startupProbe is considered successful. Resets to false when the container is restarted, or if kubelet loses state temporarily. Is always true when no startupProbe is defined.", + "": "ContainerStatus contains details for the current status of this container.", + "name": "Name is a DNS_LABEL representing the unique name of the container. Each container in a pod must have a unique name across all container types. Cannot be updated.", + "state": "State holds details about the container's current condition.", + "lastState": "LastTerminationState holds the last termination state of the container to help debug container crashes and restarts. This field is not populated if the container is still running and RestartCount is 0.", + "ready": "Ready specifies whether the container is currently passing its readiness check. The value will change as readiness probes keep executing. If no readiness probes are specified, this field defaults to true once the container is fully started (see Started field).\n\nThe value is typically used to determine whether a container is ready to accept traffic.", + "restartCount": "RestartCount holds the number of times the container has been restarted. Kubelet makes an effort to always increment the value, but there are cases when the state may be lost due to node restarts and then the value may be reset to 0. The value is never negative.", + "image": "Image is the name of container image that the container is running. The container image may not match the image used in the PodSpec, as it may have been resolved by the runtime. More info: https://kubernetes.io/docs/concepts/containers/images.", + "imageID": "ImageID is the image ID of the container's image. The image ID may not match the image ID of the image used in the PodSpec, as it may have been resolved by the runtime.", + "containerID": "ContainerID is the ID of the container in the format '://'. Where type is a container runtime identifier, returned from Version call of CRI API (for example \"containerd\").", + "started": "Started indicates whether the container has finished its postStart lifecycle hook and passed its startup probe. Initialized as false, becomes true after startupProbe is considered successful. Resets to false when the container is restarted, or if kubelet loses state temporarily. In both cases, startup probes will run again. Is always true when no startupProbe is defined and container is running and has passed the postStart lifecycle hook. The null value must be treated the same as false.", + "allocatedResources": "AllocatedResources represents the compute resources allocated for this container by the node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission and after successfully admitting desired pod resize.", + "resources": "Resources represents the compute resource requests and limits that have been successfully enacted on the running container after it has been started or has been successfully resized.", } func (ContainerStatus) SwaggerDoc() map[string]string { @@ -493,7 +506,7 @@ func (DownwardAPIVolumeSource) SwaggerDoc() map[string]string { var map_EmptyDirVolumeSource = map[string]string{ "": "Represents an empty directory for a pod. Empty directory volumes support ownership management and SELinux relabeling.", "medium": "medium represents what type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", - "sizeLimit": "sizeLimit is the total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir", + "sizeLimit": "sizeLimit is the total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", } func (EmptyDirVolumeSource) SwaggerDoc() map[string]string { @@ -502,7 +515,7 @@ func (EmptyDirVolumeSource) SwaggerDoc() map[string]string { var map_EndpointAddress = map[string]string{ "": "EndpointAddress is a tuple that describes single IP address.", - "ip": "The IP of this endpoint. May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), or link-local multicast ((224.0.0.0/24). IPv6 is also accepted but not fully supported on all platforms. Also, certain kubernetes components, like kube-proxy, are not IPv6 ready.", + "ip": "The IP of this endpoint. May not be loopback (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 or fe80::/10), or link-local multicast (224.0.0.0/24 or ff02::/16).", "hostname": "The Hostname of this endpoint", "nodeName": "Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node.", "targetRef": "Reference to object providing the endpoint.", @@ -517,7 +530,7 @@ var map_EndpointPort = map[string]string{ "name": "The name of this port. This must match the 'name' field in the corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is defined.", "port": "The port number of the endpoint.", "protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", - "appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.", + "appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", } func (EndpointPort) SwaggerDoc() map[string]string { @@ -609,6 +622,7 @@ var map_EphemeralContainerCommon = map[string]string{ "envFrom": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", "env": "List of environment variables to set in the container. Cannot be updated.", "resources": "Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.", + "resizePolicy": "Resources resize policy for the container.", "volumeMounts": "Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. Cannot be updated.", "volumeDevices": "volumeDevices is the list of block devices to be used by the container.", "livenessProbe": "Probes are not allowed for ephemeral containers.", @@ -1213,7 +1227,7 @@ var map_NodeStatus = map[string]string{ "allocatable": "Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity.", "phase": "NodePhase is the recently observed lifecycle phase of the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#phase The field is never populated, and now is deprecated.", "conditions": "Conditions is an array of current observed node conditions. More info: https://kubernetes.io/docs/concepts/nodes/node/#condition", - "addresses": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses Note: This field is declared as mergeable, but the merge key is not sufficiently unique, which can cause data corruption when it is merged. Callers should instead use a full-replacement patch. See https://pr.k8s.io/79391 for an example.", + "addresses": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses Note: This field is declared as mergeable, but the merge key is not sufficiently unique, which can cause data corruption when it is merged. Callers should instead use a full-replacement patch. See https://pr.k8s.io/79391 for an example. Consumers should assume that addresses can change during the lifetime of a Node. However, there are some exceptions where this may not be possible, such as Pods that inherit a Node's address in its own status or consumers of the downward API (status.hostIP).", "daemonEndpoints": "Endpoints of daemons running on the Node.", "nodeInfo": "Set of ids/uuids to uniquely identify the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#info", "images": "List of container images on this node", @@ -1292,7 +1306,7 @@ func (PersistentVolumeClaim) SwaggerDoc() map[string]string { } var map_PersistentVolumeClaimCondition = map[string]string{ - "": "PersistentVolumeClaimCondition contails details about state of pvc", + "": "PersistentVolumeClaimCondition contains details about state of pvc", "lastProbeTime": "lastProbeTime is the time we probed the condition.", "lastTransitionTime": "lastTransitionTime is the time the condition transitioned from one status to another.", "reason": "reason is a unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \"ResizeStarted\" that means the underlying persistent volume is being resized.", @@ -1668,7 +1682,7 @@ var map_PodSpec = map[string]string{ "initContainers": "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/", "containers": "List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated.", "ephemeralContainers": "List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.", - "restartPolicy": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy", + "restartPolicy": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy", "terminationGracePeriodSeconds": "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.", "activeDeadlineSeconds": "Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.", "dnsPolicy": "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.", @@ -1701,7 +1715,7 @@ var map_PodSpec = map[string]string{ "setHostnameAsFQDN": "If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false.", "os": "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set.\n\nIf the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions\n\nIf the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup", "hostUsers": "Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.", - "schedulingGates": "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. More info: https://git.k8s.io/enhancements/keps/sig-scheduling/3521-pod-scheduling-readiness.\n\nThis is an alpha-level feature enabled by PodSchedulingReadiness feature gate.", + "schedulingGates": "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\n\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\n\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate.", "resourceClaims": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable.", } @@ -1722,8 +1736,9 @@ var map_PodStatus = map[string]string{ "startTime": "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.", "initContainerStatuses": "The list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", "containerStatuses": "The list has one entry per container in the manifest. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", - "qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md", + "qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes", "ephemeralContainerStatuses": "Status for any ephemeral containers that have run in this pod.", + "resize": "Status of resources resize desired for pod's containers. It is empty if no resources resize is pending. Any changes to container resources will automatically set this to \"Proposed\"", } func (PodStatus) SwaggerDoc() map[string]string { @@ -1841,7 +1856,7 @@ var map_ProbeHandler = map[string]string{ "exec": "Exec specifies the action to take.", "httpGet": "HTTPGet specifies the http request to perform.", "tcpSocket": "TCPSocket specifies an action involving a TCP port.", - "grpc": "GRPC specifies an action involving a GRPC port. This is a beta field and requires enabling GRPCContainerProbe feature gate.", + "grpc": "GRPC specifies an action involving a GRPC port.", } func (ProbeHandler) SwaggerDoc() map[string]string { @@ -1954,7 +1969,7 @@ var map_ReplicationControllerSpec = map[string]string{ "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller", "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", "selector": "Selector is a label query over pods that should match the Replicas count. If Selector is empty, it is defaulted to the labels present on the Pod template. Label keys and values that must match in order to be controlled by this replication controller, if empty defaulted to labels on Pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. This takes precedence over a TemplateRef. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", + "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. This takes precedence over a TemplateRef. The only allowed template.spec.restartPolicy value is \"Always\". More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", } func (ReplicationControllerSpec) SwaggerDoc() map[string]string { @@ -2040,7 +2055,7 @@ func (ResourceQuotaStatus) SwaggerDoc() map[string]string { var map_ResourceRequirements = map[string]string{ "": "ResourceRequirements describes the compute resource requirements.", "limits": "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", - "requests": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + "requests": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", "claims": "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable. It can only be set for containers.", } @@ -2450,7 +2465,7 @@ var map_TopologySpreadConstraint = map[string]string{ "minDomains": "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule.\n\nFor example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: ", "nodeAffinityPolicy": "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.\n\nIf this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.", "nodeTaintsPolicy": "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included.\n\nIf this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.", - "matchLabelKeys": "MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector.", + "matchLabelKeys": "MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. MatchLabelKeys cannot be set when LabelSelector isn't set. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector.\n\nThis is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default).", } func (TopologySpreadConstraint) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go index 2bf1c8ad64..bfb7e0bff5 100644 --- a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go @@ -788,6 +788,11 @@ func (in *Container) DeepCopyInto(out *Container) { } } in.Resources.DeepCopyInto(&out.Resources) + if in.ResizePolicy != nil { + in, out := &in.ResizePolicy, &out.ResizePolicy + *out = make([]ContainerResizePolicy, len(*in)) + copy(*out, *in) + } if in.VolumeMounts != nil { in, out := &in.VolumeMounts, &out.VolumeMounts *out = make([]VolumeMount, len(*in)) @@ -875,6 +880,22 @@ func (in *ContainerPort) DeepCopy() *ContainerPort { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerResizePolicy) DeepCopyInto(out *ContainerResizePolicy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerResizePolicy. +func (in *ContainerResizePolicy) DeepCopy() *ContainerResizePolicy { + if in == nil { + return nil + } + out := new(ContainerResizePolicy) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ContainerState) DeepCopyInto(out *ContainerState) { *out = *in @@ -967,6 +988,18 @@ func (in *ContainerStatus) DeepCopyInto(out *ContainerStatus) { *out = new(bool) **out = **in } + if in.AllocatedResources != nil { + in, out := &in.AllocatedResources, &out.AllocatedResources + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(ResourceRequirements) + (*in).DeepCopyInto(*out) + } return } @@ -1382,6 +1415,11 @@ func (in *EphemeralContainerCommon) DeepCopyInto(out *EphemeralContainerCommon) } } in.Resources.DeepCopyInto(&out.Resources) + if in.ResizePolicy != nil { + in, out := &in.ResizePolicy, &out.ResizePolicy + *out = make([]ContainerResizePolicy, len(*in)) + copy(*out, *in) + } if in.VolumeMounts != nil { in, out := &in.VolumeMounts, &out.VolumeMounts *out = make([]VolumeMount, len(*in)) @@ -5517,7 +5555,7 @@ func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { } if in.InternalTrafficPolicy != nil { in, out := &in.InternalTrafficPolicy, &out.InternalTrafficPolicy - *out = new(ServiceInternalTrafficPolicyType) + *out = new(ServiceInternalTrafficPolicy) **out = **in } return diff --git a/vendor/k8s.io/api/discovery/v1/generated.proto b/vendor/k8s.io/api/discovery/v1/generated.proto index 9cbe46394a..b7150ef2cb 100644 --- a/vendor/k8s.io/api/discovery/v1/generated.proto +++ b/vendor/k8s.io/api/discovery/v1/generated.proto @@ -86,7 +86,9 @@ message EndpointConditions { // according to whatever system is managing the endpoint. A nil value // indicates an unknown state. In most cases consumers should interpret this // unknown state as ready. For compatibility reasons, ready should never be - // "true" for terminating endpoints. + // "true" for terminating endpoints, except when the normal readiness + // behavior is being explicitly overridden, for example when the associated + // Service has set the publishNotReadyAddresses flag. // +optional optional bool ready = 1; @@ -115,9 +117,8 @@ message EndpointHints { // EndpointPort represents a Port used by an EndpointSlice // +structType=atomic message EndpointPort { - // The name of this port. All ports in an EndpointSlice must have a unique - // name. If the EndpointSlice is dervied from a Kubernetes service, this - // corresponds to the Service.ports[].name. + // name represents the name of this port. All ports in an EndpointSlice must have a unique name. + // If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. // Name must either be an empty string or pass DNS_LABEL validation: // * must be no more than 63 characters long. // * must consist of lower case alphanumeric characters or '-'. @@ -125,21 +126,28 @@ message EndpointPort { // Default is empty string. optional string name = 1; - // The IP protocol for this port. + // protocol represents the IP protocol for this port. // Must be UDP, TCP, or SCTP. // Default is TCP. optional string protocol = 2; - // The port number of the endpoint. + // port represents the port number of the endpoint. // If this is not specified, ports are not restricted and must be // interpreted in the context of the specific consumer. optional int32 port = 3; // The application protocol for this port. + // This is used as a hint for implementations to offer richer behavior for protocols that they understand. // This field follows standard Kubernetes label syntax. - // Un-prefixed names are reserved for IANA standard service names (as per + // Valid values are either: + // + // * Un-prefixed protocol names - reserved for IANA standard service names (as per // RFC-6335 and https://www.iana.org/assignments/service-names). - // Non-standard protocols should use prefixed names such as + // + // * Kubernetes-defined prefixed names: + // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 + // + // * Other protocols should use implementation-defined prefixed names such as // mycompany.com/my-custom-protocol. // +optional optional string appProtocol = 4; @@ -183,7 +191,7 @@ message EndpointSliceList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // List of endpoint slices + // items is the list of endpoint slices repeated EndpointSlice items = 2; } diff --git a/vendor/k8s.io/api/discovery/v1/types.go b/vendor/k8s.io/api/discovery/v1/types.go index 2df80c3d5c..9b4daafca9 100644 --- a/vendor/k8s.io/api/discovery/v1/types.go +++ b/vendor/k8s.io/api/discovery/v1/types.go @@ -29,9 +29,11 @@ import ( // labels, which must be joined to produce the full set of endpoints. type EndpointSlice struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // addressType specifies the type of address carried by this EndpointSlice. // All addresses in this slice must be the same type. This field is // immutable after creation. The following address types are currently @@ -40,10 +42,12 @@ type EndpointSlice struct { // * IPv6: Represents an IPv6 Address. // * FQDN: Represents a Fully Qualified Domain Name. AddressType AddressType `json:"addressType" protobuf:"bytes,4,rep,name=addressType"` + // endpoints is a list of unique endpoints in this slice. Each slice may // include a maximum of 1000 endpoints. // +listType=atomic Endpoints []Endpoint `json:"endpoints" protobuf:"bytes,2,rep,name=endpoints"` + // ports specifies the list of network ports exposed by each endpoint in // this slice. Each port must have a unique name. When ports is empty, it // indicates that there are no defined ports. When a port is defined with a @@ -61,8 +65,10 @@ type AddressType string const ( // AddressTypeIPv4 represents an IPv4 Address. AddressTypeIPv4 = AddressType(v1.IPv4Protocol) + // AddressTypeIPv6 represents an IPv6 Address. AddressTypeIPv6 = AddressType(v1.IPv6Protocol) + // AddressTypeFQDN represents a FQDN. AddressTypeFQDN = AddressType("FQDN") ) @@ -77,8 +83,10 @@ type Endpoint struct { // use the first element. Refer to: https://issue.k8s.io/106267 // +listType=set Addresses []string `json:"addresses" protobuf:"bytes,1,rep,name=addresses"` + // conditions contains information about the current status of the endpoint. Conditions EndpointConditions `json:"conditions,omitempty" protobuf:"bytes,2,opt,name=conditions"` + // hostname of this endpoint. This field may be used by consumers of // endpoints to distinguish endpoints from each other (e.g. in DNS names). // Multiple endpoints which use the same hostname should be considered @@ -86,6 +94,7 @@ type Endpoint struct { // Label (RFC 1123) validation. // +optional Hostname *string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"` + // targetRef is a reference to a Kubernetes object that represents this // endpoint. // +optional @@ -104,9 +113,11 @@ type Endpoint struct { // be used to determine endpoints local to a Node. // +optional NodeName *string `json:"nodeName,omitempty" protobuf:"bytes,6,opt,name=nodeName"` + // zone is the name of the Zone this endpoint exists in. // +optional Zone *string `json:"zone,omitempty" protobuf:"bytes,7,opt,name=zone"` + // hints contains information associated with how an endpoint should be // consumed. // +optional @@ -119,7 +130,9 @@ type EndpointConditions struct { // according to whatever system is managing the endpoint. A nil value // indicates an unknown state. In most cases consumers should interpret this // unknown state as ready. For compatibility reasons, ready should never be - // "true" for terminating endpoints. + // "true" for terminating endpoints, except when the normal readiness + // behavior is being explicitly overridden, for example when the associated + // Service has set the publishNotReadyAddresses flag. // +optional Ready *bool `json:"ready,omitempty" protobuf:"bytes,1,name=ready"` @@ -154,28 +167,37 @@ type ForZone struct { // EndpointPort represents a Port used by an EndpointSlice // +structType=atomic type EndpointPort struct { - // The name of this port. All ports in an EndpointSlice must have a unique - // name. If the EndpointSlice is dervied from a Kubernetes service, this - // corresponds to the Service.ports[].name. + // name represents the name of this port. All ports in an EndpointSlice must have a unique name. + // If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. // Name must either be an empty string or pass DNS_LABEL validation: // * must be no more than 63 characters long. // * must consist of lower case alphanumeric characters or '-'. // * must start and end with an alphanumeric character. // Default is empty string. Name *string `json:"name,omitempty" protobuf:"bytes,1,name=name"` - // The IP protocol for this port. + + // protocol represents the IP protocol for this port. // Must be UDP, TCP, or SCTP. // Default is TCP. Protocol *v1.Protocol `json:"protocol,omitempty" protobuf:"bytes,2,name=protocol"` - // The port number of the endpoint. + + // port represents the port number of the endpoint. // If this is not specified, ports are not restricted and must be // interpreted in the context of the specific consumer. Port *int32 `json:"port,omitempty" protobuf:"bytes,3,opt,name=port"` + // The application protocol for this port. + // This is used as a hint for implementations to offer richer behavior for protocols that they understand. // This field follows standard Kubernetes label syntax. - // Un-prefixed names are reserved for IANA standard service names (as per + // Valid values are either: + // + // * Un-prefixed protocol names - reserved for IANA standard service names (as per // RFC-6335 and https://www.iana.org/assignments/service-names). - // Non-standard protocols should use prefixed names such as + // + // * Kubernetes-defined prefixed names: + // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 + // + // * Other protocols should use implementation-defined prefixed names such as // mycompany.com/my-custom-protocol. // +optional AppProtocol *string `json:"appProtocol,omitempty" protobuf:"bytes,4,name=appProtocol"` @@ -186,9 +208,11 @@ type EndpointPort struct { // EndpointSliceList represents a list of endpoint slices type EndpointSliceList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // List of endpoint slices + + // items is the list of endpoint slices Items []EndpointSlice `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go index 746408b668..c780c9573d 100644 --- a/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Endpoint = map[string]string{ @@ -45,7 +45,7 @@ func (Endpoint) SwaggerDoc() map[string]string { var map_EndpointConditions = map[string]string{ "": "EndpointConditions represents the current condition of an endpoint.", - "ready": "ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready. For compatibility reasons, ready should never be \"true\" for terminating endpoints.", + "ready": "ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready. For compatibility reasons, ready should never be \"true\" for terminating endpoints, except when the normal readiness behavior is being explicitly overridden, for example when the associated Service has set the publishNotReadyAddresses flag.", "serving": "serving is identical to ready except that it is set regardless of the terminating state of endpoints. This condition should be set to true for a ready endpoint that is terminating. If nil, consumers should defer to the ready condition.", "terminating": "terminating indicates that this endpoint is terminating. A nil value indicates an unknown state. Consumers should interpret this unknown state to mean that the endpoint is not terminating.", } @@ -65,10 +65,10 @@ func (EndpointHints) SwaggerDoc() map[string]string { var map_EndpointPort = map[string]string{ "": "EndpointPort represents a Port used by an EndpointSlice", - "name": "The name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", - "protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", - "port": "The port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.", - "appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.", + "name": "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", + "protocol": "protocol represents the IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", + "port": "port represents the port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.", + "appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", } func (EndpointPort) SwaggerDoc() map[string]string { @@ -90,7 +90,7 @@ func (EndpointSlice) SwaggerDoc() map[string]string { var map_EndpointSliceList = map[string]string{ "": "EndpointSliceList represents a list of endpoint slices", "metadata": "Standard list metadata.", - "items": "List of endpoint slices", + "items": "items is the list of endpoint slices", } func (EndpointSliceList) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/discovery/v1beta1/generated.proto b/vendor/k8s.io/api/discovery/v1beta1/generated.proto index 2979e64a71..8b6c360b0e 100644 --- a/vendor/k8s.io/api/discovery/v1beta1/generated.proto +++ b/vendor/k8s.io/api/discovery/v1beta1/generated.proto @@ -118,9 +118,8 @@ message EndpointHints { // EndpointPort represents a Port used by an EndpointSlice message EndpointPort { - // The name of this port. All ports in an EndpointSlice must have a unique - // name. If the EndpointSlice is dervied from a Kubernetes service, this - // corresponds to the Service.ports[].name. + // name represents the name of this port. All ports in an EndpointSlice must have a unique name. + // If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. // Name must either be an empty string or pass DNS_LABEL validation: // * must be no more than 63 characters long. // * must consist of lower case alphanumeric characters or '-'. @@ -128,17 +127,17 @@ message EndpointPort { // Default is empty string. optional string name = 1; - // The IP protocol for this port. + // protocol represents the IP protocol for this port. // Must be UDP, TCP, or SCTP. // Default is TCP. optional string protocol = 2; - // The port number of the endpoint. + // port represents the port number of the endpoint. // If this is not specified, ports are not restricted and must be // interpreted in the context of the specific consumer. optional int32 port = 3; - // The application protocol for this port. + // appProtocol represents the application protocol for this port. // This field follows standard Kubernetes label syntax. // Un-prefixed names are reserved for IANA standard service names (as per // RFC-6335 and https://www.iana.org/assignments/service-names). @@ -186,7 +185,7 @@ message EndpointSliceList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // List of endpoint slices + // items is the list of endpoint slices repeated EndpointSlice items = 2; } diff --git a/vendor/k8s.io/api/discovery/v1beta1/types.go b/vendor/k8s.io/api/discovery/v1beta1/types.go index 7a02bead59..f09f7f320c 100644 --- a/vendor/k8s.io/api/discovery/v1beta1/types.go +++ b/vendor/k8s.io/api/discovery/v1beta1/types.go @@ -33,9 +33,11 @@ import ( // labels, which must be joined to produce the full set of endpoints. type EndpointSlice struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // addressType specifies the type of address carried by this EndpointSlice. // All addresses in this slice must be the same type. This field is // immutable after creation. The following address types are currently @@ -44,10 +46,12 @@ type EndpointSlice struct { // * IPv6: Represents an IPv6 Address. // * FQDN: Represents a Fully Qualified Domain Name. AddressType AddressType `json:"addressType" protobuf:"bytes,4,rep,name=addressType"` + // endpoints is a list of unique endpoints in this slice. Each slice may // include a maximum of 1000 endpoints. // +listType=atomic Endpoints []Endpoint `json:"endpoints" protobuf:"bytes,2,rep,name=endpoints"` + // ports specifies the list of network ports exposed by each endpoint in // this slice. Each port must have a unique name. When ports is empty, it // indicates that there are no defined ports. When a port is defined with a @@ -64,8 +68,10 @@ type AddressType string const ( // AddressTypeIPv4 represents an IPv4 Address. AddressTypeIPv4 = AddressType(v1.IPv4Protocol) + // AddressTypeIPv6 represents an IPv6 Address. AddressTypeIPv6 = AddressType(v1.IPv6Protocol) + // AddressTypeFQDN represents a FQDN. AddressTypeFQDN = AddressType("FQDN") ) @@ -80,8 +86,10 @@ type Endpoint struct { // use the first element. Refer to: https://issue.k8s.io/106267 // +listType=set Addresses []string `json:"addresses" protobuf:"bytes,1,rep,name=addresses"` + // conditions contains information about the current status of the endpoint. Conditions EndpointConditions `json:"conditions,omitempty" protobuf:"bytes,2,opt,name=conditions"` + // hostname of this endpoint. This field may be used by consumers of // endpoints to distinguish endpoints from each other (e.g. in DNS names). // Multiple endpoints which use the same hostname should be considered @@ -89,10 +97,12 @@ type Endpoint struct { // Label (RFC 1123) validation. // +optional Hostname *string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"` + // targetRef is a reference to a Kubernetes object that represents this // endpoint. // +optional TargetRef *v1.ObjectReference `json:"targetRef,omitempty" protobuf:"bytes,4,opt,name=targetRef"` + // topology contains arbitrary topology information associated with the // endpoint. These key/value pairs must conform with the label format. // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels @@ -108,10 +118,12 @@ type Endpoint struct { // This field is deprecated and will be removed in future api versions. // +optional Topology map[string]string `json:"topology,omitempty" protobuf:"bytes,5,opt,name=topology"` + // nodeName represents the name of the Node hosting this endpoint. This can // be used to determine endpoints local to a Node. // +optional NodeName *string `json:"nodeName,omitempty" protobuf:"bytes,6,opt,name=nodeName"` + // hints contains information associated with how an endpoint should be // consumed. // +featureGate=TopologyAwareHints @@ -159,24 +171,26 @@ type ForZone struct { // EndpointPort represents a Port used by an EndpointSlice type EndpointPort struct { - // The name of this port. All ports in an EndpointSlice must have a unique - // name. If the EndpointSlice is dervied from a Kubernetes service, this - // corresponds to the Service.ports[].name. + // name represents the name of this port. All ports in an EndpointSlice must have a unique name. + // If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. // Name must either be an empty string or pass DNS_LABEL validation: // * must be no more than 63 characters long. // * must consist of lower case alphanumeric characters or '-'. // * must start and end with an alphanumeric character. // Default is empty string. Name *string `json:"name,omitempty" protobuf:"bytes,1,name=name"` - // The IP protocol for this port. + + // protocol represents the IP protocol for this port. // Must be UDP, TCP, or SCTP. // Default is TCP. Protocol *v1.Protocol `json:"protocol,omitempty" protobuf:"bytes,2,name=protocol"` - // The port number of the endpoint. + + // port represents the port number of the endpoint. // If this is not specified, ports are not restricted and must be // interpreted in the context of the specific consumer. Port *int32 `json:"port,omitempty" protobuf:"bytes,3,opt,name=port"` - // The application protocol for this port. + + // appProtocol represents the application protocol for this port. // This field follows standard Kubernetes label syntax. // Un-prefixed names are reserved for IANA standard service names (as per // RFC-6335 and https://www.iana.org/assignments/service-names). @@ -195,9 +209,11 @@ type EndpointPort struct { // EndpointSliceList represents a list of endpoint slices type EndpointSliceList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // List of endpoint slices + + // items is the list of endpoint slices Items []EndpointSlice `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go index e1c974b397..b1d4c306cc 100644 --- a/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Endpoint = map[string]string{ @@ -64,10 +64,10 @@ func (EndpointHints) SwaggerDoc() map[string]string { var map_EndpointPort = map[string]string{ "": "EndpointPort represents a Port used by an EndpointSlice", - "name": "The name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", - "protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", - "port": "The port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.", - "appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.", + "name": "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", + "protocol": "protocol represents the IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", + "port": "port represents the port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.", + "appProtocol": "appProtocol represents the application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.", } func (EndpointPort) SwaggerDoc() map[string]string { @@ -89,7 +89,7 @@ func (EndpointSlice) SwaggerDoc() map[string]string { var map_EndpointSliceList = map[string]string{ "": "EndpointSliceList represents a list of endpoint slices", "metadata": "Standard list metadata.", - "items": "List of endpoint slices", + "items": "items is the list of endpoint slices", } func (EndpointSliceList) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/events/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/events/v1/types_swagger_doc_generated.go index 797da63bb7..44ac0c3bb6 100644 --- a/vendor/k8s.io/api/events/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/events/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Event = map[string]string{ diff --git a/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go index 0e6bd5a83c..e6c28a4f8c 100644 --- a/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Event = map[string]string{ diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go index 333142b3e3..863ebbc4a7 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go @@ -49,94 +49,10 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -func (m *AllowedCSIDriver) Reset() { *m = AllowedCSIDriver{} } -func (*AllowedCSIDriver) ProtoMessage() {} -func (*AllowedCSIDriver) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{0} -} -func (m *AllowedCSIDriver) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AllowedCSIDriver) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AllowedCSIDriver) XXX_Merge(src proto.Message) { - xxx_messageInfo_AllowedCSIDriver.Merge(m, src) -} -func (m *AllowedCSIDriver) XXX_Size() int { - return m.Size() -} -func (m *AllowedCSIDriver) XXX_DiscardUnknown() { - xxx_messageInfo_AllowedCSIDriver.DiscardUnknown(m) -} - -var xxx_messageInfo_AllowedCSIDriver proto.InternalMessageInfo - -func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} } -func (*AllowedFlexVolume) ProtoMessage() {} -func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{1} -} -func (m *AllowedFlexVolume) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AllowedFlexVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AllowedFlexVolume) XXX_Merge(src proto.Message) { - xxx_messageInfo_AllowedFlexVolume.Merge(m, src) -} -func (m *AllowedFlexVolume) XXX_Size() int { - return m.Size() -} -func (m *AllowedFlexVolume) XXX_DiscardUnknown() { - xxx_messageInfo_AllowedFlexVolume.DiscardUnknown(m) -} - -var xxx_messageInfo_AllowedFlexVolume proto.InternalMessageInfo - -func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} } -func (*AllowedHostPath) ProtoMessage() {} -func (*AllowedHostPath) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{2} -} -func (m *AllowedHostPath) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AllowedHostPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AllowedHostPath) XXX_Merge(src proto.Message) { - xxx_messageInfo_AllowedHostPath.Merge(m, src) -} -func (m *AllowedHostPath) XXX_Size() int { - return m.Size() -} -func (m *AllowedHostPath) XXX_DiscardUnknown() { - xxx_messageInfo_AllowedHostPath.DiscardUnknown(m) -} - -var xxx_messageInfo_AllowedHostPath proto.InternalMessageInfo - func (m *DaemonSet) Reset() { *m = DaemonSet{} } func (*DaemonSet) ProtoMessage() {} func (*DaemonSet) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{3} + return fileDescriptor_cdc93917efc28165, []int{0} } func (m *DaemonSet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -164,7 +80,7 @@ var xxx_messageInfo_DaemonSet proto.InternalMessageInfo func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } func (*DaemonSetCondition) ProtoMessage() {} func (*DaemonSetCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{4} + return fileDescriptor_cdc93917efc28165, []int{1} } func (m *DaemonSetCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -192,7 +108,7 @@ var xxx_messageInfo_DaemonSetCondition proto.InternalMessageInfo func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } func (*DaemonSetList) ProtoMessage() {} func (*DaemonSetList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{5} + return fileDescriptor_cdc93917efc28165, []int{2} } func (m *DaemonSetList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -220,7 +136,7 @@ var xxx_messageInfo_DaemonSetList proto.InternalMessageInfo func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } func (*DaemonSetSpec) ProtoMessage() {} func (*DaemonSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{6} + return fileDescriptor_cdc93917efc28165, []int{3} } func (m *DaemonSetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -248,7 +164,7 @@ var xxx_messageInfo_DaemonSetSpec proto.InternalMessageInfo func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } func (*DaemonSetStatus) ProtoMessage() {} func (*DaemonSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{7} + return fileDescriptor_cdc93917efc28165, []int{4} } func (m *DaemonSetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -276,7 +192,7 @@ var xxx_messageInfo_DaemonSetStatus proto.InternalMessageInfo func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } func (*DaemonSetUpdateStrategy) ProtoMessage() {} func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{8} + return fileDescriptor_cdc93917efc28165, []int{5} } func (m *DaemonSetUpdateStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -304,7 +220,7 @@ var xxx_messageInfo_DaemonSetUpdateStrategy proto.InternalMessageInfo func (m *Deployment) Reset() { *m = Deployment{} } func (*Deployment) ProtoMessage() {} func (*Deployment) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{9} + return fileDescriptor_cdc93917efc28165, []int{6} } func (m *Deployment) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -332,7 +248,7 @@ var xxx_messageInfo_Deployment proto.InternalMessageInfo func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } func (*DeploymentCondition) ProtoMessage() {} func (*DeploymentCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{10} + return fileDescriptor_cdc93917efc28165, []int{7} } func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -360,7 +276,7 @@ var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo func (m *DeploymentList) Reset() { *m = DeploymentList{} } func (*DeploymentList) ProtoMessage() {} func (*DeploymentList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{11} + return fileDescriptor_cdc93917efc28165, []int{8} } func (m *DeploymentList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -388,7 +304,7 @@ var xxx_messageInfo_DeploymentList proto.InternalMessageInfo func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } func (*DeploymentRollback) ProtoMessage() {} func (*DeploymentRollback) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{12} + return fileDescriptor_cdc93917efc28165, []int{9} } func (m *DeploymentRollback) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -416,7 +332,7 @@ var xxx_messageInfo_DeploymentRollback proto.InternalMessageInfo func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } func (*DeploymentSpec) ProtoMessage() {} func (*DeploymentSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{13} + return fileDescriptor_cdc93917efc28165, []int{10} } func (m *DeploymentSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -444,7 +360,7 @@ var xxx_messageInfo_DeploymentSpec proto.InternalMessageInfo func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } func (*DeploymentStatus) ProtoMessage() {} func (*DeploymentStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{14} + return fileDescriptor_cdc93917efc28165, []int{11} } func (m *DeploymentStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -472,7 +388,7 @@ var xxx_messageInfo_DeploymentStatus proto.InternalMessageInfo func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } func (*DeploymentStrategy) ProtoMessage() {} func (*DeploymentStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{15} + return fileDescriptor_cdc93917efc28165, []int{12} } func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -497,38 +413,10 @@ func (m *DeploymentStrategy) XXX_DiscardUnknown() { var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo -func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } -func (*FSGroupStrategyOptions) ProtoMessage() {} -func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{16} -} -func (m *FSGroupStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *FSGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *FSGroupStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_FSGroupStrategyOptions.Merge(m, src) -} -func (m *FSGroupStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *FSGroupStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_FSGroupStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_FSGroupStrategyOptions proto.InternalMessageInfo - func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } func (*HTTPIngressPath) ProtoMessage() {} func (*HTTPIngressPath) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{17} + return fileDescriptor_cdc93917efc28165, []int{13} } func (m *HTTPIngressPath) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -556,7 +444,7 @@ var xxx_messageInfo_HTTPIngressPath proto.InternalMessageInfo func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } func (*HTTPIngressRuleValue) ProtoMessage() {} func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{18} + return fileDescriptor_cdc93917efc28165, []int{14} } func (m *HTTPIngressRuleValue) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -581,66 +469,10 @@ func (m *HTTPIngressRuleValue) XXX_DiscardUnknown() { var xxx_messageInfo_HTTPIngressRuleValue proto.InternalMessageInfo -func (m *HostPortRange) Reset() { *m = HostPortRange{} } -func (*HostPortRange) ProtoMessage() {} -func (*HostPortRange) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{19} -} -func (m *HostPortRange) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HostPortRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HostPortRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_HostPortRange.Merge(m, src) -} -func (m *HostPortRange) XXX_Size() int { - return m.Size() -} -func (m *HostPortRange) XXX_DiscardUnknown() { - xxx_messageInfo_HostPortRange.DiscardUnknown(m) -} - -var xxx_messageInfo_HostPortRange proto.InternalMessageInfo - -func (m *IDRange) Reset() { *m = IDRange{} } -func (*IDRange) ProtoMessage() {} -func (*IDRange) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{20} -} -func (m *IDRange) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IDRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *IDRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_IDRange.Merge(m, src) -} -func (m *IDRange) XXX_Size() int { - return m.Size() -} -func (m *IDRange) XXX_DiscardUnknown() { - xxx_messageInfo_IDRange.DiscardUnknown(m) -} - -var xxx_messageInfo_IDRange proto.InternalMessageInfo - func (m *IPBlock) Reset() { *m = IPBlock{} } func (*IPBlock) ProtoMessage() {} func (*IPBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{21} + return fileDescriptor_cdc93917efc28165, []int{15} } func (m *IPBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -668,7 +500,7 @@ var xxx_messageInfo_IPBlock proto.InternalMessageInfo func (m *Ingress) Reset() { *m = Ingress{} } func (*Ingress) ProtoMessage() {} func (*Ingress) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{22} + return fileDescriptor_cdc93917efc28165, []int{16} } func (m *Ingress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -696,7 +528,7 @@ var xxx_messageInfo_Ingress proto.InternalMessageInfo func (m *IngressBackend) Reset() { *m = IngressBackend{} } func (*IngressBackend) ProtoMessage() {} func (*IngressBackend) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{23} + return fileDescriptor_cdc93917efc28165, []int{17} } func (m *IngressBackend) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -724,7 +556,7 @@ var xxx_messageInfo_IngressBackend proto.InternalMessageInfo func (m *IngressList) Reset() { *m = IngressList{} } func (*IngressList) ProtoMessage() {} func (*IngressList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{24} + return fileDescriptor_cdc93917efc28165, []int{18} } func (m *IngressList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -752,7 +584,7 @@ var xxx_messageInfo_IngressList proto.InternalMessageInfo func (m *IngressLoadBalancerIngress) Reset() { *m = IngressLoadBalancerIngress{} } func (*IngressLoadBalancerIngress) ProtoMessage() {} func (*IngressLoadBalancerIngress) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{25} + return fileDescriptor_cdc93917efc28165, []int{19} } func (m *IngressLoadBalancerIngress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -780,7 +612,7 @@ var xxx_messageInfo_IngressLoadBalancerIngress proto.InternalMessageInfo func (m *IngressLoadBalancerStatus) Reset() { *m = IngressLoadBalancerStatus{} } func (*IngressLoadBalancerStatus) ProtoMessage() {} func (*IngressLoadBalancerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{26} + return fileDescriptor_cdc93917efc28165, []int{20} } func (m *IngressLoadBalancerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -808,7 +640,7 @@ var xxx_messageInfo_IngressLoadBalancerStatus proto.InternalMessageInfo func (m *IngressPortStatus) Reset() { *m = IngressPortStatus{} } func (*IngressPortStatus) ProtoMessage() {} func (*IngressPortStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{27} + return fileDescriptor_cdc93917efc28165, []int{21} } func (m *IngressPortStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -836,7 +668,7 @@ var xxx_messageInfo_IngressPortStatus proto.InternalMessageInfo func (m *IngressRule) Reset() { *m = IngressRule{} } func (*IngressRule) ProtoMessage() {} func (*IngressRule) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{28} + return fileDescriptor_cdc93917efc28165, []int{22} } func (m *IngressRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -864,7 +696,7 @@ var xxx_messageInfo_IngressRule proto.InternalMessageInfo func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } func (*IngressRuleValue) ProtoMessage() {} func (*IngressRuleValue) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{29} + return fileDescriptor_cdc93917efc28165, []int{23} } func (m *IngressRuleValue) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -892,7 +724,7 @@ var xxx_messageInfo_IngressRuleValue proto.InternalMessageInfo func (m *IngressSpec) Reset() { *m = IngressSpec{} } func (*IngressSpec) ProtoMessage() {} func (*IngressSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{30} + return fileDescriptor_cdc93917efc28165, []int{24} } func (m *IngressSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -920,7 +752,7 @@ var xxx_messageInfo_IngressSpec proto.InternalMessageInfo func (m *IngressStatus) Reset() { *m = IngressStatus{} } func (*IngressStatus) ProtoMessage() {} func (*IngressStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{31} + return fileDescriptor_cdc93917efc28165, []int{25} } func (m *IngressStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -948,7 +780,7 @@ var xxx_messageInfo_IngressStatus proto.InternalMessageInfo func (m *IngressTLS) Reset() { *m = IngressTLS{} } func (*IngressTLS) ProtoMessage() {} func (*IngressTLS) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{32} + return fileDescriptor_cdc93917efc28165, []int{26} } func (m *IngressTLS) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -976,7 +808,7 @@ var xxx_messageInfo_IngressTLS proto.InternalMessageInfo func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } func (*NetworkPolicy) ProtoMessage() {} func (*NetworkPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{33} + return fileDescriptor_cdc93917efc28165, []int{27} } func (m *NetworkPolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1004,7 +836,7 @@ var xxx_messageInfo_NetworkPolicy proto.InternalMessageInfo func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} } func (*NetworkPolicyEgressRule) ProtoMessage() {} func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{34} + return fileDescriptor_cdc93917efc28165, []int{28} } func (m *NetworkPolicyEgressRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1032,7 +864,7 @@ var xxx_messageInfo_NetworkPolicyEgressRule proto.InternalMessageInfo func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} } func (*NetworkPolicyIngressRule) ProtoMessage() {} func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{35} + return fileDescriptor_cdc93917efc28165, []int{29} } func (m *NetworkPolicyIngressRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1060,7 +892,7 @@ var xxx_messageInfo_NetworkPolicyIngressRule proto.InternalMessageInfo func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } func (*NetworkPolicyList) ProtoMessage() {} func (*NetworkPolicyList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{36} + return fileDescriptor_cdc93917efc28165, []int{30} } func (m *NetworkPolicyList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1088,7 +920,7 @@ var xxx_messageInfo_NetworkPolicyList proto.InternalMessageInfo func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } func (*NetworkPolicyPeer) ProtoMessage() {} func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{37} + return fileDescriptor_cdc93917efc28165, []int{31} } func (m *NetworkPolicyPeer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1116,7 +948,7 @@ var xxx_messageInfo_NetworkPolicyPeer proto.InternalMessageInfo func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } func (*NetworkPolicyPort) ProtoMessage() {} func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{38} + return fileDescriptor_cdc93917efc28165, []int{32} } func (m *NetworkPolicyPort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1144,7 +976,7 @@ var xxx_messageInfo_NetworkPolicyPort proto.InternalMessageInfo func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } func (*NetworkPolicySpec) ProtoMessage() {} func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{39} + return fileDescriptor_cdc93917efc28165, []int{33} } func (m *NetworkPolicySpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1172,7 +1004,7 @@ var xxx_messageInfo_NetworkPolicySpec proto.InternalMessageInfo func (m *NetworkPolicyStatus) Reset() { *m = NetworkPolicyStatus{} } func (*NetworkPolicyStatus) ProtoMessage() {} func (*NetworkPolicyStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{40} + return fileDescriptor_cdc93917efc28165, []int{34} } func (m *NetworkPolicyStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1197,94 +1029,10 @@ func (m *NetworkPolicyStatus) XXX_DiscardUnknown() { var xxx_messageInfo_NetworkPolicyStatus proto.InternalMessageInfo -func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } -func (*PodSecurityPolicy) ProtoMessage() {} -func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{41} -} -func (m *PodSecurityPolicy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodSecurityPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodSecurityPolicy) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSecurityPolicy.Merge(m, src) -} -func (m *PodSecurityPolicy) XXX_Size() int { - return m.Size() -} -func (m *PodSecurityPolicy) XXX_DiscardUnknown() { - xxx_messageInfo_PodSecurityPolicy.DiscardUnknown(m) -} - -var xxx_messageInfo_PodSecurityPolicy proto.InternalMessageInfo - -func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } -func (*PodSecurityPolicyList) ProtoMessage() {} -func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{42} -} -func (m *PodSecurityPolicyList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodSecurityPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodSecurityPolicyList) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSecurityPolicyList.Merge(m, src) -} -func (m *PodSecurityPolicyList) XXX_Size() int { - return m.Size() -} -func (m *PodSecurityPolicyList) XXX_DiscardUnknown() { - xxx_messageInfo_PodSecurityPolicyList.DiscardUnknown(m) -} - -var xxx_messageInfo_PodSecurityPolicyList proto.InternalMessageInfo - -func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } -func (*PodSecurityPolicySpec) ProtoMessage() {} -func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{43} -} -func (m *PodSecurityPolicySpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodSecurityPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodSecurityPolicySpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSecurityPolicySpec.Merge(m, src) -} -func (m *PodSecurityPolicySpec) XXX_Size() int { - return m.Size() -} -func (m *PodSecurityPolicySpec) XXX_DiscardUnknown() { - xxx_messageInfo_PodSecurityPolicySpec.DiscardUnknown(m) -} - -var xxx_messageInfo_PodSecurityPolicySpec proto.InternalMessageInfo - func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } func (*ReplicaSet) ProtoMessage() {} func (*ReplicaSet) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{44} + return fileDescriptor_cdc93917efc28165, []int{35} } func (m *ReplicaSet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1312,7 +1060,7 @@ var xxx_messageInfo_ReplicaSet proto.InternalMessageInfo func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } func (*ReplicaSetCondition) ProtoMessage() {} func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{45} + return fileDescriptor_cdc93917efc28165, []int{36} } func (m *ReplicaSetCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1340,7 +1088,7 @@ var xxx_messageInfo_ReplicaSetCondition proto.InternalMessageInfo func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } func (*ReplicaSetList) ProtoMessage() {} func (*ReplicaSetList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{46} + return fileDescriptor_cdc93917efc28165, []int{37} } func (m *ReplicaSetList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1368,7 +1116,7 @@ var xxx_messageInfo_ReplicaSetList proto.InternalMessageInfo func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } func (*ReplicaSetSpec) ProtoMessage() {} func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{47} + return fileDescriptor_cdc93917efc28165, []int{38} } func (m *ReplicaSetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1396,7 +1144,7 @@ var xxx_messageInfo_ReplicaSetSpec proto.InternalMessageInfo func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } func (*ReplicaSetStatus) ProtoMessage() {} func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{48} + return fileDescriptor_cdc93917efc28165, []int{39} } func (m *ReplicaSetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1424,7 +1172,7 @@ var xxx_messageInfo_ReplicaSetStatus proto.InternalMessageInfo func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } func (*RollbackConfig) ProtoMessage() {} func (*RollbackConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{49} + return fileDescriptor_cdc93917efc28165, []int{40} } func (m *RollbackConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1452,7 +1200,7 @@ var xxx_messageInfo_RollbackConfig proto.InternalMessageInfo func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } func (*RollingUpdateDaemonSet) ProtoMessage() {} func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{50} + return fileDescriptor_cdc93917efc28165, []int{41} } func (m *RollingUpdateDaemonSet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1480,7 +1228,7 @@ var xxx_messageInfo_RollingUpdateDaemonSet proto.InternalMessageInfo func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } func (*RollingUpdateDeployment) ProtoMessage() {} func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{51} + return fileDescriptor_cdc93917efc28165, []int{42} } func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1505,15 +1253,15 @@ func (m *RollingUpdateDeployment) XXX_DiscardUnknown() { var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo -func (m *RunAsGroupStrategyOptions) Reset() { *m = RunAsGroupStrategyOptions{} } -func (*RunAsGroupStrategyOptions) ProtoMessage() {} -func (*RunAsGroupStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{52} +func (m *Scale) Reset() { *m = Scale{} } +func (*Scale) ProtoMessage() {} +func (*Scale) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{43} } -func (m *RunAsGroupStrategyOptions) XXX_Unmarshal(b []byte) error { +func (m *Scale) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *RunAsGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *Scale) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -1521,27 +1269,27 @@ func (m *RunAsGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([ } return b[:n], nil } -func (m *RunAsGroupStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_RunAsGroupStrategyOptions.Merge(m, src) +func (m *Scale) XXX_Merge(src proto.Message) { + xxx_messageInfo_Scale.Merge(m, src) } -func (m *RunAsGroupStrategyOptions) XXX_Size() int { +func (m *Scale) XXX_Size() int { return m.Size() } -func (m *RunAsGroupStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_RunAsGroupStrategyOptions.DiscardUnknown(m) +func (m *Scale) XXX_DiscardUnknown() { + xxx_messageInfo_Scale.DiscardUnknown(m) } -var xxx_messageInfo_RunAsGroupStrategyOptions proto.InternalMessageInfo +var xxx_messageInfo_Scale proto.InternalMessageInfo -func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } -func (*RunAsUserStrategyOptions) ProtoMessage() {} -func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{53} +func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } +func (*ScaleSpec) ProtoMessage() {} +func (*ScaleSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{44} } -func (m *RunAsUserStrategyOptions) XXX_Unmarshal(b []byte) error { +func (m *ScaleSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *RunAsUserStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ScaleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -1549,126 +1297,14 @@ func (m *RunAsUserStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([] } return b[:n], nil } -func (m *RunAsUserStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_RunAsUserStrategyOptions.Merge(m, src) +func (m *ScaleSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleSpec.Merge(m, src) } -func (m *RunAsUserStrategyOptions) XXX_Size() int { +func (m *ScaleSpec) XXX_Size() int { return m.Size() } -func (m *RunAsUserStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_RunAsUserStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_RunAsUserStrategyOptions proto.InternalMessageInfo - -func (m *RuntimeClassStrategyOptions) Reset() { *m = RuntimeClassStrategyOptions{} } -func (*RuntimeClassStrategyOptions) ProtoMessage() {} -func (*RuntimeClassStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{54} -} -func (m *RuntimeClassStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RuntimeClassStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RuntimeClassStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_RuntimeClassStrategyOptions.Merge(m, src) -} -func (m *RuntimeClassStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *RuntimeClassStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_RuntimeClassStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_RuntimeClassStrategyOptions proto.InternalMessageInfo - -func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } -func (*SELinuxStrategyOptions) ProtoMessage() {} -func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{55} -} -func (m *SELinuxStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SELinuxStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SELinuxStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_SELinuxStrategyOptions.Merge(m, src) -} -func (m *SELinuxStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *SELinuxStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_SELinuxStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_SELinuxStrategyOptions proto.InternalMessageInfo - -func (m *Scale) Reset() { *m = Scale{} } -func (*Scale) ProtoMessage() {} -func (*Scale) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{56} -} -func (m *Scale) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Scale) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Scale) XXX_Merge(src proto.Message) { - xxx_messageInfo_Scale.Merge(m, src) -} -func (m *Scale) XXX_Size() int { - return m.Size() -} -func (m *Scale) XXX_DiscardUnknown() { - xxx_messageInfo_Scale.DiscardUnknown(m) -} - -var xxx_messageInfo_Scale proto.InternalMessageInfo - -func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } -func (*ScaleSpec) ProtoMessage() {} -func (*ScaleSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{57} -} -func (m *ScaleSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ScaleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ScaleSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_ScaleSpec.Merge(m, src) -} -func (m *ScaleSpec) XXX_Size() int { - return m.Size() -} -func (m *ScaleSpec) XXX_DiscardUnknown() { - xxx_messageInfo_ScaleSpec.DiscardUnknown(m) +func (m *ScaleSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleSpec.DiscardUnknown(m) } var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo @@ -1676,7 +1312,7 @@ var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } func (*ScaleStatus) ProtoMessage() {} func (*ScaleStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{58} + return fileDescriptor_cdc93917efc28165, []int{45} } func (m *ScaleStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1701,38 +1337,7 @@ func (m *ScaleStatus) XXX_DiscardUnknown() { var xxx_messageInfo_ScaleStatus proto.InternalMessageInfo -func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } -func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} -func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{59} -} -func (m *SupplementalGroupsStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SupplementalGroupsStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SupplementalGroupsStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_SupplementalGroupsStrategyOptions.Merge(m, src) -} -func (m *SupplementalGroupsStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *SupplementalGroupsStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_SupplementalGroupsStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_SupplementalGroupsStrategyOptions proto.InternalMessageInfo - func init() { - proto.RegisterType((*AllowedCSIDriver)(nil), "k8s.io.api.extensions.v1beta1.AllowedCSIDriver") - proto.RegisterType((*AllowedFlexVolume)(nil), "k8s.io.api.extensions.v1beta1.AllowedFlexVolume") - proto.RegisterType((*AllowedHostPath)(nil), "k8s.io.api.extensions.v1beta1.AllowedHostPath") proto.RegisterType((*DaemonSet)(nil), "k8s.io.api.extensions.v1beta1.DaemonSet") proto.RegisterType((*DaemonSetCondition)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetCondition") proto.RegisterType((*DaemonSetList)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetList") @@ -1747,11 +1352,8 @@ func init() { proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.api.extensions.v1beta1.DeploymentSpec") proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.api.extensions.v1beta1.DeploymentStatus") proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.api.extensions.v1beta1.DeploymentStrategy") - proto.RegisterType((*FSGroupStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.FSGroupStrategyOptions") proto.RegisterType((*HTTPIngressPath)(nil), "k8s.io.api.extensions.v1beta1.HTTPIngressPath") proto.RegisterType((*HTTPIngressRuleValue)(nil), "k8s.io.api.extensions.v1beta1.HTTPIngressRuleValue") - proto.RegisterType((*HostPortRange)(nil), "k8s.io.api.extensions.v1beta1.HostPortRange") - proto.RegisterType((*IDRange)(nil), "k8s.io.api.extensions.v1beta1.IDRange") proto.RegisterType((*IPBlock)(nil), "k8s.io.api.extensions.v1beta1.IPBlock") proto.RegisterType((*Ingress)(nil), "k8s.io.api.extensions.v1beta1.Ingress") proto.RegisterType((*IngressBackend)(nil), "k8s.io.api.extensions.v1beta1.IngressBackend") @@ -1772,9 +1374,6 @@ func init() { proto.RegisterType((*NetworkPolicyPort)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyPort") proto.RegisterType((*NetworkPolicySpec)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicySpec") proto.RegisterType((*NetworkPolicyStatus)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyStatus") - proto.RegisterType((*PodSecurityPolicy)(nil), "k8s.io.api.extensions.v1beta1.PodSecurityPolicy") - proto.RegisterType((*PodSecurityPolicyList)(nil), "k8s.io.api.extensions.v1beta1.PodSecurityPolicyList") - proto.RegisterType((*PodSecurityPolicySpec)(nil), "k8s.io.api.extensions.v1beta1.PodSecurityPolicySpec") proto.RegisterType((*ReplicaSet)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSet") proto.RegisterType((*ReplicaSetCondition)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetCondition") proto.RegisterType((*ReplicaSetList)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetList") @@ -1783,15 +1382,10 @@ func init() { proto.RegisterType((*RollbackConfig)(nil), "k8s.io.api.extensions.v1beta1.RollbackConfig") proto.RegisterType((*RollingUpdateDaemonSet)(nil), "k8s.io.api.extensions.v1beta1.RollingUpdateDaemonSet") proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.api.extensions.v1beta1.RollingUpdateDeployment") - proto.RegisterType((*RunAsGroupStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.RunAsGroupStrategyOptions") - proto.RegisterType((*RunAsUserStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.RunAsUserStrategyOptions") - proto.RegisterType((*RuntimeClassStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.RuntimeClassStrategyOptions") - proto.RegisterType((*SELinuxStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.SELinuxStrategyOptions") proto.RegisterType((*Scale)(nil), "k8s.io.api.extensions.v1beta1.Scale") proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.extensions.v1beta1.ScaleSpec") proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.extensions.v1beta1.ScaleStatus") proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.extensions.v1beta1.ScaleStatus.SelectorEntry") - proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.SupplementalGroupsStrategyOptions") } func init() { @@ -1799,344 +1393,188 @@ func init() { } var fileDescriptor_cdc93917efc28165 = []byte{ - // 3920 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5c, 0x4b, 0x6c, 0x1c, 0xd9, - 0x75, 0x55, 0x75, 0x93, 0xec, 0xe6, 0xa5, 0xf8, 0x7b, 0xa4, 0xc8, 0x1e, 0xca, 0x62, 0xcb, 0x35, - 0xc8, 0x44, 0x33, 0xd1, 0x74, 0x5b, 0x1c, 0x49, 0x1e, 0x8f, 0x10, 0x7b, 0xd8, 0xfc, 0x48, 0xb4, - 0xf9, 0xe9, 0x79, 0x4d, 0xca, 0xc6, 0x20, 0xe3, 0xb8, 0x58, 0xfd, 0xd8, 0xac, 0x61, 0x75, 0x55, - 0xa5, 0xaa, 0x9a, 0x66, 0x07, 0x59, 0x24, 0x48, 0x36, 0x06, 0x02, 0x24, 0x1b, 0x27, 0x59, 0x66, - 0x60, 0x20, 0xbb, 0x20, 0xcb, 0x64, 0xe1, 0x18, 0x09, 0xe2, 0x00, 0x42, 0xe0, 0x04, 0x06, 0xb2, - 0x88, 0x57, 0x44, 0x86, 0x5e, 0x05, 0x59, 0x65, 0x17, 0x68, 0x15, 0xbc, 0x4f, 0xfd, 0xab, 0xd8, - 0xd5, 0x8c, 0x44, 0xc4, 0x81, 0x57, 0x62, 0xbd, 0x7b, 0xef, 0x79, 0xbf, 0xfb, 0xee, 0x3d, 0xef, - 0xd3, 0x82, 0xcd, 0x93, 0xf7, 0x9d, 0x9a, 0x66, 0xd6, 0x4f, 0x7a, 0x87, 0xc4, 0x36, 0x88, 0x4b, - 0x9c, 0xfa, 0x29, 0x31, 0xda, 0xa6, 0x5d, 0x17, 0x02, 0xc5, 0xd2, 0xea, 0xe4, 0xcc, 0x25, 0x86, - 0xa3, 0x99, 0x86, 0x53, 0x3f, 0x7d, 0x70, 0x48, 0x5c, 0xe5, 0x41, 0xbd, 0x43, 0x0c, 0x62, 0x2b, - 0x2e, 0x69, 0xd7, 0x2c, 0xdb, 0x74, 0x4d, 0x74, 0x87, 0xab, 0xd7, 0x14, 0x4b, 0xab, 0x05, 0xea, - 0x35, 0xa1, 0xbe, 0xf4, 0x6e, 0x47, 0x73, 0x8f, 0x7b, 0x87, 0x35, 0xd5, 0xec, 0xd6, 0x3b, 0x66, - 0xc7, 0xac, 0x33, 0xab, 0xc3, 0xde, 0x11, 0xfb, 0x62, 0x1f, 0xec, 0x2f, 0x8e, 0xb6, 0x24, 0x87, - 0x2a, 0x57, 0x4d, 0x9b, 0xd4, 0x4f, 0x13, 0x35, 0x2e, 0x3d, 0x0c, 0x74, 0xba, 0x8a, 0x7a, 0xac, - 0x19, 0xc4, 0xee, 0xd7, 0xad, 0x93, 0x0e, 0x2d, 0x70, 0xea, 0x5d, 0xe2, 0x2a, 0x69, 0x56, 0xf5, - 0x2c, 0x2b, 0xbb, 0x67, 0xb8, 0x5a, 0x97, 0x24, 0x0c, 0x1e, 0x0f, 0x32, 0x70, 0xd4, 0x63, 0xd2, - 0x55, 0x12, 0x76, 0xef, 0x65, 0xd9, 0xf5, 0x5c, 0x4d, 0xaf, 0x6b, 0x86, 0xeb, 0xb8, 0x76, 0xdc, - 0x48, 0x7e, 0x08, 0x33, 0xab, 0xba, 0x6e, 0x7e, 0x97, 0xb4, 0xd7, 0x5a, 0x5b, 0xeb, 0xb6, 0x76, - 0x4a, 0x6c, 0x74, 0x17, 0x46, 0x0c, 0xa5, 0x4b, 0x2a, 0xd2, 0x5d, 0xe9, 0xde, 0x78, 0xe3, 0xe6, - 0x8b, 0xf3, 0xea, 0x8d, 0x8b, 0xf3, 0xea, 0xc8, 0xae, 0xd2, 0x25, 0x98, 0x49, 0xe4, 0x27, 0x30, - 0x2b, 0xac, 0x36, 0x75, 0x72, 0xf6, 0xdc, 0xd4, 0x7b, 0x5d, 0x82, 0xde, 0x82, 0xb1, 0x36, 0x03, - 0x10, 0x86, 0x53, 0xc2, 0x70, 0x8c, 0xc3, 0x62, 0x21, 0x95, 0x1d, 0x98, 0x16, 0xc6, 0xcf, 0x4c, - 0xc7, 0x6d, 0x2a, 0xee, 0x31, 0x5a, 0x01, 0xb0, 0x14, 0xf7, 0xb8, 0x69, 0x93, 0x23, 0xed, 0x4c, - 0x98, 0x23, 0x61, 0x0e, 0x4d, 0x5f, 0x82, 0x43, 0x5a, 0xe8, 0x3e, 0x94, 0x6d, 0xa2, 0xb4, 0xf7, - 0x0c, 0xbd, 0x5f, 0x29, 0xdc, 0x95, 0xee, 0x95, 0x1b, 0x33, 0xc2, 0xa2, 0x8c, 0x45, 0x39, 0xf6, - 0x35, 0xe4, 0xef, 0x17, 0x60, 0x7c, 0x5d, 0x21, 0x5d, 0xd3, 0x68, 0x11, 0x17, 0x7d, 0x07, 0xca, - 0x74, 0xba, 0xda, 0x8a, 0xab, 0xb0, 0xda, 0x26, 0x56, 0xbe, 0x54, 0x0b, 0xdc, 0xc9, 0x1f, 0xbd, - 0x9a, 0x75, 0xd2, 0xa1, 0x05, 0x4e, 0x8d, 0x6a, 0xd7, 0x4e, 0x1f, 0xd4, 0xf6, 0x0e, 0x3f, 0x25, - 0xaa, 0xbb, 0x43, 0x5c, 0x25, 0x68, 0x5f, 0x50, 0x86, 0x7d, 0x54, 0xb4, 0x0b, 0x23, 0x8e, 0x45, - 0x54, 0xd6, 0xb2, 0x89, 0x95, 0xfb, 0xb5, 0x4b, 0x9d, 0xb5, 0xe6, 0xb7, 0xac, 0x65, 0x11, 0x35, - 0x18, 0x71, 0xfa, 0x85, 0x19, 0x0e, 0x7a, 0x0e, 0x63, 0x8e, 0xab, 0xb8, 0x3d, 0xa7, 0x52, 0x64, - 0x88, 0xb5, 0xdc, 0x88, 0xcc, 0x2a, 0x98, 0x0c, 0xfe, 0x8d, 0x05, 0x9a, 0xfc, 0x1f, 0x05, 0x40, - 0xbe, 0xee, 0x9a, 0x69, 0xb4, 0x35, 0x57, 0x33, 0x0d, 0xf4, 0x01, 0x8c, 0xb8, 0x7d, 0xcb, 0x73, - 0x81, 0xb7, 0xbc, 0x06, 0xed, 0xf7, 0x2d, 0xf2, 0xf2, 0xbc, 0xba, 0x90, 0xb4, 0xa0, 0x12, 0xcc, - 0x6c, 0xd0, 0xb6, 0xdf, 0xd4, 0x02, 0xb3, 0x7e, 0x18, 0xad, 0xfa, 0xe5, 0x79, 0x35, 0x65, 0xb1, - 0xd5, 0x7c, 0xa4, 0x68, 0x03, 0xd1, 0x29, 0x20, 0x5d, 0x71, 0xdc, 0x7d, 0x5b, 0x31, 0x1c, 0x5e, - 0x93, 0xd6, 0x25, 0x62, 0x10, 0xde, 0xc9, 0x37, 0x69, 0xd4, 0xa2, 0xb1, 0x24, 0x5a, 0x81, 0xb6, - 0x13, 0x68, 0x38, 0xa5, 0x06, 0xea, 0xcd, 0x36, 0x51, 0x1c, 0xd3, 0xa8, 0x8c, 0x44, 0xbd, 0x19, - 0xb3, 0x52, 0x2c, 0xa4, 0xe8, 0x6d, 0x28, 0x75, 0x89, 0xe3, 0x28, 0x1d, 0x52, 0x19, 0x65, 0x8a, - 0xd3, 0x42, 0xb1, 0xb4, 0xc3, 0x8b, 0xb1, 0x27, 0x97, 0x7f, 0x28, 0xc1, 0xa4, 0x3f, 0x72, 0xdb, - 0x9a, 0xe3, 0xa2, 0xdf, 0x48, 0xf8, 0x61, 0x2d, 0x5f, 0x97, 0xa8, 0x35, 0xf3, 0x42, 0xdf, 0xe7, - 0xbd, 0x92, 0x90, 0x0f, 0xee, 0xc0, 0xa8, 0xe6, 0x92, 0x2e, 0x9d, 0x87, 0xe2, 0xbd, 0x89, 0x95, - 0x7b, 0x79, 0x5d, 0xa6, 0x31, 0x29, 0x40, 0x47, 0xb7, 0xa8, 0x39, 0xe6, 0x28, 0xf2, 0x9f, 0x8c, - 0x84, 0x9a, 0x4f, 0x5d, 0x13, 0x7d, 0x02, 0x65, 0x87, 0xe8, 0x44, 0x75, 0x4d, 0x5b, 0x34, 0xff, - 0xbd, 0x9c, 0xcd, 0x57, 0x0e, 0x89, 0xde, 0x12, 0xa6, 0x8d, 0x9b, 0xb4, 0xfd, 0xde, 0x17, 0xf6, - 0x21, 0xd1, 0x47, 0x50, 0x76, 0x49, 0xd7, 0xd2, 0x15, 0x97, 0x88, 0x75, 0xf4, 0x66, 0xb8, 0x0b, - 0xd4, 0x73, 0x28, 0x58, 0xd3, 0x6c, 0xef, 0x0b, 0x35, 0xb6, 0x7c, 0xfc, 0x21, 0xf1, 0x4a, 0xb1, - 0x0f, 0x83, 0x4e, 0x61, 0xaa, 0x67, 0xb5, 0xa9, 0xa6, 0x4b, 0xa3, 0x60, 0xa7, 0x2f, 0x3c, 0xe9, - 0x71, 0xde, 0xb1, 0x39, 0x88, 0x58, 0x37, 0x16, 0x44, 0x5d, 0x53, 0xd1, 0x72, 0x1c, 0xab, 0x05, - 0xad, 0xc2, 0x74, 0x57, 0x33, 0x68, 0x5c, 0xea, 0xb7, 0x88, 0x6a, 0x1a, 0x6d, 0x87, 0xb9, 0xd5, - 0x68, 0x63, 0x51, 0x00, 0x4c, 0xef, 0x44, 0xc5, 0x38, 0xae, 0x8f, 0xbe, 0x0e, 0xc8, 0xeb, 0xc6, - 0x53, 0x1e, 0xc4, 0x35, 0xd3, 0x60, 0x3e, 0x57, 0x0c, 0x9c, 0x7b, 0x3f, 0xa1, 0x81, 0x53, 0xac, - 0xd0, 0x36, 0xcc, 0xdb, 0xe4, 0x54, 0xa3, 0x7d, 0x7c, 0xa6, 0x39, 0xae, 0x69, 0xf7, 0xb7, 0xb5, - 0xae, 0xe6, 0x56, 0xc6, 0x58, 0x9b, 0x2a, 0x17, 0xe7, 0xd5, 0x79, 0x9c, 0x22, 0xc7, 0xa9, 0x56, - 0xf2, 0x9f, 0x8e, 0xc1, 0x74, 0x2c, 0xde, 0xa0, 0xe7, 0xb0, 0xa0, 0xf6, 0x6c, 0x9b, 0x18, 0xee, - 0x6e, 0xaf, 0x7b, 0x48, 0xec, 0x96, 0x7a, 0x4c, 0xda, 0x3d, 0x9d, 0xb4, 0x99, 0xa3, 0x8c, 0x36, - 0x96, 0x45, 0x8b, 0x17, 0xd6, 0x52, 0xb5, 0x70, 0x86, 0x35, 0x1d, 0x05, 0x83, 0x15, 0xed, 0x68, - 0x8e, 0xe3, 0x63, 0x16, 0x18, 0xa6, 0x3f, 0x0a, 0xbb, 0x09, 0x0d, 0x9c, 0x62, 0x45, 0xdb, 0xd8, - 0x26, 0x8e, 0x66, 0x93, 0x76, 0xbc, 0x8d, 0xc5, 0x68, 0x1b, 0xd7, 0x53, 0xb5, 0x70, 0x86, 0x35, - 0x7a, 0x04, 0x13, 0xbc, 0x36, 0x36, 0x7f, 0x62, 0xa2, 0xe7, 0x04, 0xd8, 0xc4, 0x6e, 0x20, 0xc2, - 0x61, 0x3d, 0xda, 0x35, 0xf3, 0xd0, 0x21, 0xf6, 0x29, 0x69, 0x67, 0x4f, 0xf0, 0x5e, 0x42, 0x03, - 0xa7, 0x58, 0xd1, 0xae, 0x71, 0x0f, 0x4c, 0x74, 0x6d, 0x2c, 0xda, 0xb5, 0x83, 0x54, 0x2d, 0x9c, - 0x61, 0x4d, 0xfd, 0x98, 0x37, 0x79, 0xf5, 0x54, 0xd1, 0x74, 0xe5, 0x50, 0x27, 0x95, 0x52, 0xd4, - 0x8f, 0x77, 0xa3, 0x62, 0x1c, 0xd7, 0x47, 0x4f, 0x61, 0x96, 0x17, 0x1d, 0x18, 0x8a, 0x0f, 0x52, - 0x66, 0x20, 0x6f, 0x08, 0x90, 0xd9, 0xdd, 0xb8, 0x02, 0x4e, 0xda, 0xa0, 0x0f, 0x60, 0x4a, 0x35, - 0x75, 0x9d, 0xf9, 0xe3, 0x9a, 0xd9, 0x33, 0xdc, 0xca, 0x38, 0x43, 0x41, 0x74, 0x3d, 0xae, 0x45, - 0x24, 0x38, 0xa6, 0x89, 0x08, 0x80, 0xea, 0x25, 0x1c, 0xa7, 0x02, 0x2c, 0x3e, 0x3e, 0xc8, 0x1b, - 0x03, 0xfc, 0x54, 0x15, 0x70, 0x00, 0xbf, 0xc8, 0xc1, 0x21, 0x60, 0xf9, 0x9f, 0x24, 0x58, 0xcc, - 0x08, 0x1d, 0xe8, 0x6b, 0x91, 0x14, 0xfb, 0x6b, 0xb1, 0x14, 0x7b, 0x3b, 0xc3, 0x2c, 0x94, 0x67, - 0x0d, 0x98, 0xb4, 0x69, 0xaf, 0x8c, 0x0e, 0x57, 0x11, 0x31, 0xf2, 0xd1, 0x80, 0x6e, 0xe0, 0xb0, - 0x4d, 0x10, 0xf3, 0x67, 0x2f, 0xce, 0xab, 0x93, 0x11, 0x19, 0x8e, 0xc2, 0xcb, 0x7f, 0x56, 0x00, - 0x58, 0x27, 0x96, 0x6e, 0xf6, 0xbb, 0xc4, 0xb8, 0x0e, 0x0e, 0xb5, 0x17, 0xe1, 0x50, 0xef, 0x0e, - 0x9a, 0x1e, 0xbf, 0x69, 0x99, 0x24, 0xea, 0x9b, 0x31, 0x12, 0x55, 0xcf, 0x0f, 0x79, 0x39, 0x8b, - 0xfa, 0xb7, 0x22, 0xcc, 0x05, 0xca, 0x01, 0x8d, 0x7a, 0x12, 0x99, 0xe3, 0x5f, 0x8d, 0xcd, 0xf1, - 0x62, 0x8a, 0xc9, 0x6b, 0xe3, 0x51, 0x9f, 0xc2, 0x14, 0x65, 0x39, 0x7c, 0x2e, 0x19, 0x87, 0x1a, - 0x1b, 0x9a, 0x43, 0xf9, 0xd9, 0x6e, 0x3b, 0x82, 0x84, 0x63, 0xc8, 0x19, 0x9c, 0xad, 0xf4, 0x8b, - 0xc8, 0xd9, 0x7e, 0x24, 0xc1, 0x54, 0x30, 0x4d, 0xd7, 0x40, 0xda, 0x76, 0xa3, 0xa4, 0xed, 0xed, - 0xdc, 0x2e, 0x9a, 0xc1, 0xda, 0xfe, 0x9b, 0x12, 0x7c, 0x5f, 0x89, 0x2e, 0xf0, 0x43, 0x45, 0x3d, - 0x19, 0xbc, 0xc7, 0x43, 0xdf, 0x97, 0x00, 0x89, 0x2c, 0xb0, 0x6a, 0x18, 0xa6, 0xab, 0xf0, 0x58, - 0xc9, 0x9b, 0xb5, 0x95, 0xbb, 0x59, 0x5e, 0x8d, 0xb5, 0x83, 0x04, 0xd6, 0x86, 0xe1, 0xda, 0xfd, - 0x60, 0x92, 0x93, 0x0a, 0x38, 0xa5, 0x01, 0x48, 0x01, 0xb0, 0x05, 0xe6, 0xbe, 0x29, 0x16, 0xf2, - 0xbb, 0x39, 0x62, 0x1e, 0x35, 0x58, 0x33, 0x8d, 0x23, 0xad, 0x13, 0x84, 0x1d, 0xec, 0x03, 0xe1, - 0x10, 0xe8, 0xd2, 0x06, 0x2c, 0x66, 0xb4, 0x16, 0xcd, 0x40, 0xf1, 0x84, 0xf4, 0xf9, 0xb0, 0x61, - 0xfa, 0x27, 0x9a, 0x87, 0xd1, 0x53, 0x45, 0xef, 0xf1, 0xf0, 0x3b, 0x8e, 0xf9, 0xc7, 0x07, 0x85, - 0xf7, 0x25, 0xf9, 0x87, 0xa3, 0x61, 0xdf, 0x61, 0x8c, 0xf9, 0x1e, 0xdd, 0xb4, 0x5a, 0xba, 0xa6, - 0x2a, 0x8e, 0x20, 0x42, 0x37, 0xf9, 0x86, 0x95, 0x97, 0x61, 0x5f, 0x1a, 0xe1, 0xd6, 0x85, 0xd7, - 0xcb, 0xad, 0x8b, 0xaf, 0x86, 0x5b, 0xff, 0x26, 0x94, 0x1d, 0x8f, 0x55, 0x8f, 0x30, 0xc8, 0x07, - 0x43, 0xc4, 0x57, 0x41, 0xa8, 0xfd, 0x0a, 0x7c, 0x2a, 0xed, 0x83, 0xa6, 0x91, 0xe8, 0xd1, 0x21, - 0x49, 0xf4, 0x2b, 0x25, 0xbe, 0x34, 0xde, 0x58, 0x4a, 0xcf, 0x21, 0x6d, 0x16, 0xdb, 0xca, 0x41, - 0xbc, 0x69, 0xb2, 0x52, 0x2c, 0xa4, 0xe8, 0x93, 0x88, 0xcb, 0x96, 0xaf, 0xe2, 0xb2, 0x53, 0xd9, - 0xee, 0x8a, 0x0e, 0x60, 0xd1, 0xb2, 0xcd, 0x8e, 0x4d, 0x1c, 0x67, 0x9d, 0x28, 0x6d, 0x5d, 0x33, - 0x88, 0x37, 0x3e, 0x9c, 0x11, 0xdd, 0xbe, 0x38, 0xaf, 0x2e, 0x36, 0xd3, 0x55, 0x70, 0x96, 0xad, - 0xfc, 0x62, 0x04, 0x66, 0xe2, 0x19, 0x30, 0x83, 0xa4, 0x4a, 0x57, 0x22, 0xa9, 0xf7, 0x43, 0x8b, - 0x81, 0x33, 0xf8, 0xd0, 0x09, 0x4e, 0x62, 0x41, 0xac, 0xc2, 0xb4, 0x88, 0x06, 0x9e, 0x50, 0xd0, - 0x74, 0x7f, 0xf6, 0x0f, 0xa2, 0x62, 0x1c, 0xd7, 0x47, 0x4f, 0x60, 0xd2, 0x66, 0xbc, 0xdb, 0x03, - 0xe0, 0xdc, 0xf5, 0x96, 0x00, 0x98, 0xc4, 0x61, 0x21, 0x8e, 0xea, 0x52, 0xde, 0x1a, 0xd0, 0x51, - 0x0f, 0x60, 0x24, 0xca, 0x5b, 0x57, 0xe3, 0x0a, 0x38, 0x69, 0x83, 0x76, 0x60, 0xae, 0x67, 0x24, - 0xa1, 0xb8, 0x2b, 0xdf, 0x16, 0x50, 0x73, 0x07, 0x49, 0x15, 0x9c, 0x66, 0x87, 0x8e, 0x22, 0x54, - 0x76, 0x8c, 0x85, 0xe7, 0x95, 0xdc, 0x0b, 0x2f, 0x37, 0x97, 0x4d, 0xa1, 0xdb, 0xe5, 0xbc, 0x74, - 0x5b, 0xfe, 0x7b, 0x29, 0x9c, 0x84, 0x7c, 0x0a, 0x3c, 0xe8, 0x94, 0x29, 0x61, 0x11, 0x62, 0x47, - 0x66, 0x3a, 0xfb, 0x7d, 0x3c, 0x14, 0xfb, 0x0d, 0x92, 0xe7, 0x60, 0xfa, 0xfb, 0x99, 0x04, 0x0b, - 0x9b, 0xad, 0xa7, 0xb6, 0xd9, 0xb3, 0xbc, 0xe6, 0xec, 0x59, 0x7c, 0x68, 0xbe, 0x0c, 0x23, 0x76, - 0x4f, 0xf7, 0xfa, 0xf1, 0xa6, 0xd7, 0x0f, 0xdc, 0xd3, 0x69, 0x3f, 0xe6, 0x62, 0x56, 0xbc, 0x13, - 0xd4, 0x00, 0xed, 0xc2, 0x98, 0xad, 0x18, 0x1d, 0xe2, 0xa5, 0xd5, 0xb7, 0x06, 0xb4, 0x7e, 0x6b, - 0x1d, 0x53, 0xf5, 0x10, 0xb1, 0x61, 0xd6, 0x58, 0xa0, 0xc8, 0xff, 0x20, 0xc1, 0xf4, 0xb3, 0xfd, - 0xfd, 0xe6, 0x96, 0xc1, 0x56, 0x34, 0x3b, 0x5b, 0xbd, 0x0b, 0x23, 0x96, 0xe2, 0x1e, 0xc7, 0x33, - 0x3d, 0x95, 0x61, 0x26, 0x41, 0x0f, 0xa1, 0x4c, 0xff, 0xa5, 0xed, 0x62, 0x4b, 0x6a, 0x9c, 0x05, - 0xc2, 0x72, 0x53, 0x94, 0xbd, 0x0c, 0xfd, 0x8d, 0x7d, 0x4d, 0xf4, 0x2d, 0x28, 0xd1, 0xf8, 0x43, - 0x8c, 0x76, 0x4e, 0x82, 0x2e, 0x1a, 0xd5, 0xe0, 0x46, 0x01, 0xe7, 0x12, 0x05, 0xd8, 0x83, 0x93, - 0x4f, 0x60, 0x3e, 0xd4, 0x09, 0x3a, 0x8a, 0xcf, 0x69, 0x4e, 0x45, 0x2d, 0x18, 0xa5, 0xb5, 0xd3, - 0xcc, 0x59, 0xcc, 0x71, 0x04, 0x1a, 0x1b, 0x88, 0x80, 0x1f, 0xd1, 0x2f, 0x07, 0x73, 0x2c, 0x79, - 0x07, 0x26, 0xd9, 0x31, 0xb4, 0x69, 0xbb, 0x6c, 0x30, 0xd1, 0x1d, 0x28, 0x76, 0x35, 0x43, 0x64, - 0xe7, 0x09, 0x61, 0x53, 0xa4, 0x99, 0x85, 0x96, 0x33, 0xb1, 0x72, 0x26, 0xe2, 0x55, 0x20, 0x56, - 0xce, 0x30, 0x2d, 0x97, 0x9f, 0x42, 0x49, 0x4c, 0x52, 0x18, 0xa8, 0x78, 0x39, 0x50, 0x31, 0x05, - 0x68, 0x0f, 0x4a, 0x5b, 0xcd, 0x86, 0x6e, 0x72, 0xae, 0xa6, 0x6a, 0x6d, 0x3b, 0x3e, 0x83, 0x6b, - 0x5b, 0xeb, 0x18, 0x33, 0x09, 0x92, 0x61, 0x8c, 0x9c, 0xa9, 0xc4, 0x72, 0x99, 0x1f, 0x8d, 0x37, - 0x80, 0xfa, 0xc6, 0x06, 0x2b, 0xc1, 0x42, 0x22, 0xff, 0x51, 0x01, 0x4a, 0x62, 0x38, 0xae, 0x61, - 0xef, 0xb6, 0x1d, 0xd9, 0xbb, 0xbd, 0x93, 0xcf, 0x35, 0x32, 0x37, 0x6e, 0xfb, 0xb1, 0x8d, 0xdb, - 0xfd, 0x9c, 0x78, 0x97, 0xef, 0xda, 0xbe, 0x57, 0x80, 0xa9, 0xa8, 0x53, 0xa2, 0x47, 0x30, 0x41, - 0xd3, 0x94, 0xa6, 0x92, 0xdd, 0x80, 0x1d, 0xfb, 0x47, 0x37, 0xad, 0x40, 0x84, 0xc3, 0x7a, 0xa8, - 0xe3, 0x9b, 0x51, 0x3f, 0x12, 0x9d, 0xce, 0x1e, 0xd2, 0x9e, 0xab, 0xe9, 0x35, 0x7e, 0x21, 0x53, - 0xdb, 0x32, 0xdc, 0x3d, 0xbb, 0xe5, 0xda, 0x9a, 0xd1, 0x49, 0x54, 0xc4, 0x9c, 0x32, 0x8c, 0x8c, - 0xbe, 0x49, 0x53, 0xa6, 0x63, 0xf6, 0x6c, 0x95, 0xa4, 0x51, 0x5f, 0x8f, 0xb6, 0xd1, 0x05, 0xda, - 0xde, 0x36, 0x55, 0x45, 0xe7, 0x93, 0x83, 0xc9, 0x11, 0xb1, 0x89, 0xa1, 0x12, 0x8f, 0x6e, 0x72, - 0x08, 0xec, 0x83, 0xc9, 0x7f, 0x23, 0xc1, 0x84, 0x18, 0x8b, 0x6b, 0xd8, 0xe4, 0x7c, 0x23, 0xba, - 0xc9, 0x79, 0x2b, 0x67, 0xe4, 0x48, 0xdf, 0xe1, 0xfc, 0xad, 0x04, 0x4b, 0x5e, 0xd3, 0x4d, 0xa5, - 0xdd, 0x50, 0x74, 0xc5, 0x50, 0x89, 0xed, 0xf9, 0xfa, 0x12, 0x14, 0x34, 0x4b, 0xcc, 0x24, 0x08, - 0x80, 0xc2, 0x56, 0x13, 0x17, 0x34, 0x8b, 0x32, 0x90, 0x63, 0xd3, 0x71, 0xd9, 0x4e, 0x88, 0x6f, - 0xb2, 0xfd, 0x56, 0x3f, 0x13, 0xe5, 0xd8, 0xd7, 0x40, 0x07, 0x30, 0x6a, 0x99, 0xb6, 0x4b, 0xb3, - 0x7e, 0x31, 0x36, 0xbf, 0x97, 0xb4, 0x9a, 0xce, 0x9b, 0x70, 0xc4, 0x20, 0x02, 0x51, 0x18, 0xcc, - 0xd1, 0xe4, 0xdf, 0x93, 0xe0, 0x8d, 0x94, 0xf6, 0x0b, 0xc2, 0xd5, 0x86, 0x92, 0xc6, 0x85, 0x22, - 0xec, 0x7d, 0x25, 0x5f, 0xb5, 0x29, 0x43, 0x11, 0x84, 0x5c, 0x2f, 0xb4, 0x7a, 0xd0, 0xf2, 0x0f, - 0x24, 0x98, 0x4d, 0xb4, 0x97, 0xa5, 0x0e, 0xea, 0xcf, 0x62, 0xa7, 0xe2, 0xa7, 0x0e, 0xea, 0x96, - 0x4c, 0x82, 0xbe, 0x01, 0x65, 0x76, 0x8f, 0xa8, 0x9a, 0xba, 0x18, 0xc0, 0xba, 0x37, 0x80, 0x4d, - 0x51, 0xfe, 0xf2, 0xbc, 0x7a, 0x3b, 0xe5, 0x9c, 0xc2, 0x13, 0x63, 0x1f, 0x00, 0x55, 0x61, 0x94, - 0xd8, 0xb6, 0x69, 0x8b, 0x24, 0x34, 0x4e, 0x47, 0x6a, 0x83, 0x16, 0x60, 0x5e, 0x2e, 0xff, 0x45, - 0xe0, 0xa4, 0x34, 0x2b, 0xd0, 0xf6, 0xd1, 0xc9, 0x89, 0x07, 0x46, 0x3a, 0x75, 0x98, 0x49, 0x50, - 0x0f, 0x66, 0xb4, 0x58, 0x1a, 0x11, 0xab, 0xb3, 0x9e, 0x6f, 0x18, 0x7d, 0xb3, 0x46, 0x45, 0xc0, - 0xcf, 0xc4, 0x25, 0x38, 0x51, 0x85, 0x4c, 0x20, 0xa1, 0x85, 0x3e, 0x82, 0x91, 0x63, 0xd7, 0xb5, - 0x52, 0x2e, 0x4a, 0x06, 0x24, 0xaf, 0xa0, 0x09, 0x65, 0xd6, 0xbb, 0xfd, 0xfd, 0x26, 0x66, 0x50, - 0xf2, 0xdf, 0x15, 0xfc, 0xf1, 0x60, 0xbb, 0xcb, 0x0f, 0xfd, 0xde, 0xae, 0xe9, 0x8a, 0xe3, 0xb0, - 0x10, 0xc6, 0x4f, 0x42, 0xe6, 0x43, 0x0d, 0xf7, 0x65, 0x38, 0xa1, 0x8d, 0xf6, 0x83, 0xa4, 0x2e, - 0x5d, 0x25, 0xa9, 0x4f, 0xa4, 0x25, 0x74, 0xf4, 0x0c, 0x8a, 0xae, 0x9e, 0xf7, 0x44, 0x43, 0x20, - 0xee, 0x6f, 0xb7, 0x82, 0xac, 0xb8, 0xbf, 0xdd, 0xc2, 0x14, 0x02, 0xed, 0xc1, 0x28, 0x25, 0x4e, - 0x34, 0x0f, 0x14, 0xf3, 0xe7, 0x15, 0x3a, 0x82, 0xc1, 0xe2, 0xa3, 0x5f, 0x0e, 0xe6, 0x38, 0xf2, - 0xef, 0x4b, 0x30, 0x19, 0xc9, 0x16, 0xc8, 0x86, 0x9b, 0x7a, 0x68, 0xed, 0x88, 0x71, 0x78, 0x7f, - 0xf8, 0x55, 0x27, 0x16, 0xfd, 0xbc, 0xa8, 0xf7, 0x66, 0x58, 0x86, 0x23, 0x75, 0xc8, 0x0a, 0x40, - 0xd0, 0x6d, 0xba, 0x0e, 0xa8, 0xf3, 0xf2, 0x05, 0x2f, 0xd6, 0x01, 0xf5, 0x69, 0x07, 0xf3, 0x72, - 0xb4, 0x02, 0xe0, 0x10, 0xd5, 0x26, 0xee, 0x6e, 0x10, 0xb8, 0xfc, 0x74, 0xdc, 0xf2, 0x25, 0x38, - 0xa4, 0x25, 0x7f, 0x56, 0x80, 0xc9, 0x5d, 0xe2, 0x7e, 0xd7, 0xb4, 0x4f, 0x9a, 0xa6, 0xae, 0xa9, - 0xfd, 0x6b, 0x20, 0x01, 0x38, 0x42, 0x02, 0x06, 0xc5, 0xcb, 0x48, 0xeb, 0x32, 0xa9, 0xc0, 0xc7, - 0x31, 0x2a, 0xb0, 0x32, 0x14, 0xea, 0xe5, 0x84, 0xe0, 0x47, 0x12, 0x2c, 0x46, 0xf4, 0x37, 0x82, - 0x58, 0xe3, 0x07, 0x7f, 0x29, 0x57, 0xf0, 0x8f, 0xc0, 0xd0, 0x80, 0x99, 0x1e, 0xfc, 0xd1, 0x36, - 0x14, 0x5c, 0x53, 0xac, 0x8c, 0xe1, 0x30, 0x09, 0xb1, 0x83, 0x7c, 0xb6, 0x6f, 0xe2, 0x82, 0x6b, - 0xca, 0xff, 0x28, 0x41, 0x25, 0xa2, 0x15, 0x8e, 0x96, 0xaf, 0xa9, 0x07, 0x18, 0x46, 0x8e, 0x6c, - 0xb3, 0x7b, 0xe5, 0x3e, 0xf8, 0x93, 0xbc, 0x69, 0x9b, 0x5d, 0xcc, 0xb0, 0xe4, 0x1f, 0x4b, 0x30, - 0x1b, 0xd1, 0xbc, 0x06, 0x4e, 0xf2, 0x51, 0x94, 0x93, 0xdc, 0x1f, 0xa6, 0x23, 0x19, 0xcc, 0xe4, - 0xc7, 0x85, 0x58, 0x37, 0x68, 0x87, 0xd1, 0x11, 0x4c, 0x58, 0x66, 0xbb, 0xf5, 0x0a, 0x2e, 0xce, - 0xa7, 0x29, 0x57, 0x6c, 0x06, 0x58, 0x38, 0x0c, 0x8c, 0xce, 0x60, 0x96, 0xd2, 0x16, 0xc7, 0x52, - 0x54, 0xd2, 0x7a, 0x05, 0x47, 0x89, 0xb7, 0xd8, 0xcd, 0x5c, 0x1c, 0x11, 0x27, 0x2b, 0x41, 0x3b, - 0x50, 0xd2, 0x2c, 0xb6, 0x77, 0x11, 0x8b, 0x74, 0x20, 0xc1, 0xe3, 0x3b, 0x1d, 0x9e, 0x3e, 0xc4, - 0x07, 0xf6, 0x30, 0xe4, 0x7f, 0x8d, 0x7b, 0x03, 0xa3, 0xc2, 0x4f, 0x43, 0xd4, 0x43, 0xdc, 0xa1, - 0x5d, 0x8d, 0x76, 0xec, 0x0a, 0x96, 0x73, 0x55, 0xd6, 0x5e, 0x8e, 0x71, 0xa2, 0x5f, 0x81, 0x12, - 0x31, 0xda, 0x6c, 0x23, 0xc0, 0x0f, 0xa8, 0x58, 0xaf, 0x36, 0x78, 0x11, 0xf6, 0x64, 0xf2, 0x1f, - 0x14, 0x63, 0xbd, 0x62, 0x29, 0xfc, 0xd3, 0x57, 0xe6, 0x1c, 0xfe, 0x66, 0x22, 0xd3, 0x41, 0x0e, - 0x03, 0x6a, 0xc9, 0x7d, 0xfe, 0xcb, 0xc3, 0xf8, 0x7c, 0x38, 0xb7, 0x66, 0x12, 0x4b, 0xf4, 0x6d, - 0x18, 0x23, 0xbc, 0x0a, 0x9e, 0xb1, 0x1f, 0x0f, 0x53, 0x45, 0x10, 0x7e, 0x83, 0x90, 0x2d, 0xca, - 0x04, 0x2a, 0xfa, 0x1a, 0x1d, 0x2f, 0xaa, 0x4b, 0xb7, 0x3c, 0x9c, 0x99, 0x8f, 0x37, 0xee, 0xf0, - 0x6e, 0xfb, 0xc5, 0x2f, 0xcf, 0xab, 0x10, 0x7c, 0xe2, 0xb0, 0x85, 0xfc, 0xdb, 0x30, 0x97, 0x92, - 0x22, 0x90, 0x1a, 0x39, 0x55, 0xe3, 0x11, 0xb3, 0x9e, 0x6f, 0x1a, 0xf2, 0x5f, 0x0f, 0xff, 0xb3, - 0x04, 0xb3, 0x6c, 0x76, 0xd4, 0x9e, 0xad, 0xb9, 0xfd, 0x6b, 0xcb, 0xcb, 0xcf, 0x23, 0x79, 0xf9, - 0xe1, 0x80, 0x29, 0x49, 0xb4, 0x30, 0x2b, 0x37, 0xcb, 0x3f, 0x91, 0xe0, 0x56, 0x42, 0xfb, 0x1a, - 0x42, 0xf7, 0x41, 0x34, 0x74, 0x7f, 0x69, 0xd8, 0x0e, 0x65, 0x84, 0xef, 0xff, 0x9a, 0x4d, 0xe9, - 0x0e, 0x5b, 0xa5, 0x2b, 0x00, 0x96, 0xad, 0x9d, 0x6a, 0x3a, 0xe9, 0x88, 0x17, 0x2d, 0xe5, 0xd0, - 0x7b, 0x45, 0x5f, 0x82, 0x43, 0x5a, 0xc8, 0x81, 0x85, 0x36, 0x39, 0x52, 0x7a, 0xba, 0xbb, 0xda, - 0x6e, 0xaf, 0x29, 0x96, 0x72, 0xa8, 0xe9, 0x9a, 0xab, 0x89, 0xb3, 0xbf, 0xf1, 0xc6, 0x13, 0xfe, - 0xd2, 0x24, 0x4d, 0xe3, 0xe5, 0x79, 0xf5, 0x4e, 0xda, 0x55, 0xaf, 0xa7, 0xd2, 0xc7, 0x19, 0xd0, - 0xa8, 0x0f, 0x15, 0x9b, 0xfc, 0x56, 0x4f, 0xb3, 0x49, 0x7b, 0xdd, 0x36, 0xad, 0x48, 0xb5, 0x45, - 0x56, 0xed, 0xaf, 0x5f, 0x9c, 0x57, 0x2b, 0x38, 0x43, 0x67, 0x70, 0xc5, 0x99, 0xf0, 0xe8, 0x53, - 0x98, 0x53, 0xc4, 0xcb, 0xd2, 0x70, 0xad, 0x7c, 0x85, 0xbe, 0x7f, 0x71, 0x5e, 0x9d, 0x5b, 0x4d, - 0x8a, 0x07, 0x57, 0x98, 0x06, 0x8a, 0xea, 0x50, 0x3a, 0x65, 0x8f, 0x50, 0x9d, 0xca, 0x28, 0xc3, - 0xa7, 0xb9, 0xaa, 0xc4, 0xdf, 0xa5, 0x52, 0xcc, 0xb1, 0xcd, 0x16, 0x5b, 0xf9, 0x9e, 0x16, 0x7a, - 0x04, 0x13, 0x94, 0x4a, 0x8b, 0x95, 0xcf, 0xae, 0x7f, 0xca, 0x41, 0xc4, 0x7c, 0x16, 0x88, 0x70, - 0x58, 0x0f, 0x7d, 0x02, 0xe3, 0xc7, 0xe2, 0xb0, 0xd0, 0xa9, 0x94, 0x72, 0xf1, 0x84, 0xc8, 0xe1, - 0x62, 0x63, 0x56, 0x54, 0x31, 0xee, 0x15, 0x3b, 0x38, 0x40, 0x44, 0x6f, 0x43, 0x89, 0x7d, 0x6c, - 0xad, 0xb3, 0xb3, 0xf5, 0x72, 0x10, 0x57, 0x9f, 0xf1, 0x62, 0xec, 0xc9, 0x3d, 0xd5, 0xad, 0xe6, - 0x1a, 0xbb, 0xe3, 0x89, 0xa9, 0x6e, 0x35, 0xd7, 0xb0, 0x27, 0x47, 0xdf, 0x81, 0x92, 0x43, 0xb6, - 0x35, 0xa3, 0x77, 0x56, 0x81, 0x5c, 0x2f, 0x44, 0x5a, 0x1b, 0x4c, 0x3b, 0x76, 0xca, 0x1d, 0xd4, - 0x20, 0xe4, 0xd8, 0x83, 0x45, 0xc7, 0x30, 0x6e, 0xf7, 0x8c, 0x55, 0xe7, 0xc0, 0x21, 0x76, 0x65, - 0x82, 0xd5, 0x31, 0x28, 0x95, 0x60, 0x4f, 0x3f, 0x5e, 0x8b, 0x3f, 0x42, 0xbe, 0x06, 0x0e, 0xc0, - 0xd1, 0x31, 0x00, 0xfb, 0x60, 0x07, 0xea, 0x95, 0x85, 0x5c, 0x5b, 0x33, 0xec, 0x1b, 0xc4, 0xeb, - 0xe2, 0x97, 0x6a, 0xbe, 0x18, 0x87, 0xb0, 0xd1, 0x1f, 0x4a, 0x80, 0x9c, 0x9e, 0x65, 0xe9, 0xa4, - 0x4b, 0x0c, 0x57, 0xd1, 0x59, 0xa9, 0x53, 0xb9, 0xc9, 0xaa, 0xfc, 0x70, 0xd0, 0x08, 0x26, 0x0c, - 0xe3, 0x55, 0xfb, 0x77, 0x65, 0x49, 0x55, 0x9c, 0x52, 0x2f, 0x9d, 0xc4, 0x23, 0xd1, 0xeb, 0xc9, - 0x5c, 0x93, 0x98, 0x7e, 0x55, 0x11, 0x4c, 0xa2, 0x90, 0x63, 0x0f, 0x16, 0x3d, 0x87, 0x05, 0xef, - 0xb5, 0x34, 0x36, 0x4d, 0x77, 0x53, 0xd3, 0x89, 0xd3, 0x77, 0x5c, 0xd2, 0xad, 0x4c, 0x31, 0x07, - 0xf3, 0x9f, 0x8c, 0xe1, 0x54, 0x2d, 0x9c, 0x61, 0x8d, 0xba, 0x50, 0xf5, 0x82, 0x13, 0x5d, 0xb9, - 0x7e, 0x74, 0xdc, 0x70, 0x54, 0x45, 0xe7, 0xd7, 0x87, 0xd3, 0xac, 0x82, 0x37, 0x2f, 0xce, 0xab, - 0xd5, 0xf5, 0xcb, 0x55, 0xf1, 0x20, 0x2c, 0xf4, 0x2d, 0xa8, 0x28, 0x59, 0xf5, 0xcc, 0xb0, 0x7a, - 0xbe, 0x40, 0x23, 0x5e, 0x66, 0x05, 0x99, 0xd6, 0xc8, 0x85, 0x19, 0x25, 0xfa, 0x6e, 0xdd, 0xa9, - 0xcc, 0xe6, 0xba, 0x89, 0x88, 0x3d, 0x77, 0x0f, 0x8e, 0x92, 0x62, 0x02, 0x07, 0x27, 0x6a, 0x40, - 0xbf, 0x03, 0x48, 0x89, 0x3f, 0xb5, 0x77, 0x2a, 0x28, 0x57, 0xa2, 0x4b, 0xbc, 0xd1, 0x0f, 0xdc, - 0x2e, 0x21, 0x72, 0x70, 0x4a, 0x3d, 0x74, 0x0f, 0xa1, 0xc4, 0x7e, 0x1e, 0xe0, 0x54, 0x16, 0x13, - 0x6c, 0xe8, 0x92, 0xca, 0x7d, 0xbb, 0xd0, 0x2d, 0x69, 0x1c, 0x11, 0x27, 0x2b, 0x41, 0xdb, 0x30, - 0x2f, 0x0a, 0x0f, 0x0c, 0x47, 0x39, 0x22, 0xad, 0xbe, 0xa3, 0xba, 0xba, 0x53, 0x99, 0x63, 0xf1, - 0x9d, 0xdd, 0xd4, 0xaf, 0xa6, 0xc8, 0x71, 0xaa, 0x15, 0xfa, 0x10, 0x66, 0x8e, 0x4c, 0xfb, 0x50, - 0x6b, 0xb7, 0x89, 0xe1, 0x21, 0xcd, 0x33, 0x24, 0x76, 0x32, 0xb6, 0x19, 0x93, 0xe1, 0x84, 0x36, - 0x72, 0xe0, 0x96, 0x40, 0x6e, 0xda, 0xa6, 0xba, 0x63, 0xf6, 0x0c, 0x97, 0x53, 0xce, 0x5b, 0x7e, - 0x1a, 0xbd, 0xb5, 0x9a, 0xa6, 0xf0, 0xf2, 0xbc, 0x7a, 0x37, 0x7d, 0x23, 0x12, 0x28, 0xe1, 0x74, - 0x6c, 0x64, 0xc1, 0x4d, 0xf1, 0xa3, 0x0f, 0x76, 0x44, 0x57, 0xa9, 0xb0, 0xa5, 0xff, 0xc1, 0xe0, - 0x80, 0xe7, 0x9b, 0xc4, 0xd7, 0xff, 0xcc, 0xc5, 0x79, 0xf5, 0x66, 0x58, 0x01, 0x47, 0x6a, 0x60, - 0x8f, 0xfc, 0xc4, 0xd5, 0xf2, 0xf5, 0xfc, 0x50, 0x62, 0xb8, 0x47, 0x7e, 0x41, 0xd3, 0x5e, 0xd9, - 0x23, 0xbf, 0x10, 0xe4, 0xe5, 0xa7, 0x43, 0xff, 0x59, 0x80, 0xb9, 0x40, 0x39, 0xf7, 0x23, 0xbf, - 0x14, 0x93, 0x5f, 0xfe, 0x58, 0x22, 0xdf, 0xc3, 0xbb, 0x60, 0xe8, 0xfe, 0xef, 0x3d, 0xbc, 0x0b, - 0xda, 0x96, 0xb1, 0x7b, 0xf8, 0xab, 0x42, 0xb8, 0x03, 0x43, 0xbe, 0xfe, 0x7a, 0x05, 0xbf, 0x17, - 0xf8, 0x85, 0x7b, 0x40, 0x26, 0xff, 0xa4, 0x08, 0x33, 0xf1, 0xd5, 0x18, 0x79, 0x24, 0x24, 0x0d, - 0x7c, 0x24, 0xd4, 0x84, 0xf9, 0xa3, 0x9e, 0xae, 0xf7, 0x59, 0x1f, 0x42, 0x2f, 0x85, 0xf8, 0x75, - 0xfd, 0x17, 0x84, 0xe5, 0xfc, 0x66, 0x8a, 0x0e, 0x4e, 0xb5, 0x4c, 0xbe, 0x19, 0x1a, 0xf9, 0xdf, - 0xbe, 0x19, 0x1a, 0xbd, 0xc2, 0x9b, 0xa1, 0xf4, 0x67, 0x57, 0xc5, 0x2b, 0x3d, 0xbb, 0xba, 0xca, - 0x83, 0xa1, 0x94, 0x20, 0x36, 0xf0, 0x74, 0xe3, 0xab, 0x30, 0x15, 0x7d, 0xc4, 0xc6, 0xe7, 0x92, - 0xbf, 0xa3, 0x13, 0xcf, 0x22, 0x42, 0x73, 0xc9, 0xcb, 0xb1, 0xaf, 0x21, 0x5f, 0x48, 0xb0, 0x90, - 0xfe, 0x58, 0x1d, 0xe9, 0x30, 0xd5, 0x55, 0xce, 0xc2, 0x3f, 0x20, 0x90, 0xae, 0x78, 0x78, 0xc7, - 0x5e, 0x2f, 0xed, 0x44, 0xb0, 0x70, 0x0c, 0x1b, 0x7d, 0x0c, 0xe5, 0xae, 0x72, 0xd6, 0xea, 0xd9, - 0x1d, 0x72, 0xe5, 0x43, 0x42, 0xb6, 0x8c, 0x76, 0x04, 0x0a, 0xf6, 0xf1, 0xe4, 0x9f, 0x4b, 0xb0, - 0x98, 0xf1, 0x26, 0xe9, 0xff, 0x51, 0x2f, 0x7f, 0x20, 0xc1, 0x1b, 0x99, 0xdb, 0x30, 0xf4, 0x38, - 0xf2, 0x7c, 0x4a, 0x8e, 0x3d, 0x9f, 0x42, 0x49, 0xc3, 0xd7, 0xf4, 0x7a, 0xea, 0x33, 0x09, 0x2a, - 0x59, 0xfb, 0x52, 0xf4, 0x28, 0xd2, 0xc8, 0x2f, 0xc6, 0x1a, 0x39, 0x9b, 0xb0, 0x7b, 0x4d, 0x6d, - 0xfc, 0x17, 0x09, 0x6e, 0x5f, 0xc2, 0xef, 0xfc, 0xed, 0x0f, 0x69, 0x87, 0xb5, 0xd8, 0xa9, 0xbd, - 0xb8, 0x4e, 0x0c, 0xb6, 0x3f, 0x29, 0x3a, 0x38, 0xd3, 0x1a, 0x1d, 0xc0, 0xa2, 0xd8, 0x7b, 0xc5, - 0x65, 0x82, 0xba, 0xb0, 0x57, 0xa6, 0xeb, 0xe9, 0x2a, 0x38, 0xcb, 0x56, 0xfe, 0x4b, 0x09, 0x16, - 0xd2, 0x0f, 0x1c, 0xd0, 0x7b, 0x91, 0x21, 0xaf, 0xc6, 0x86, 0x7c, 0x3a, 0x66, 0x25, 0x06, 0xfc, - 0xdb, 0x30, 0x25, 0x8e, 0x25, 0x04, 0x8c, 0x70, 0x66, 0x39, 0x2d, 0x3b, 0x09, 0x08, 0x8f, 0x1c, - 0xb3, 0x65, 0x12, 0x2d, 0xc3, 0x31, 0x34, 0xf9, 0x7b, 0x05, 0x18, 0x6d, 0xa9, 0x8a, 0x4e, 0xae, - 0x81, 0x1b, 0x7f, 0x3d, 0xc2, 0x8d, 0x07, 0xfd, 0x7e, 0x93, 0xb5, 0x2a, 0x93, 0x16, 0xe3, 0x18, - 0x2d, 0x7e, 0x27, 0x17, 0xda, 0xe5, 0x8c, 0xf8, 0x2b, 0x30, 0xee, 0x57, 0x3a, 0x5c, 0xa2, 0x96, - 0xff, 0xbc, 0x00, 0x13, 0xa1, 0x2a, 0x86, 0x4c, 0xf3, 0x47, 0x11, 0x6e, 0x53, 0xcc, 0x71, 0x08, - 0x14, 0xaa, 0xab, 0xe6, 0xb1, 0x19, 0xfe, 0xfb, 0x83, 0xe0, 0xc5, 0x79, 0x92, 0xe4, 0x7c, 0x15, - 0xa6, 0x5c, 0xc5, 0xee, 0x10, 0xd7, 0xbf, 0x90, 0xe1, 0x4f, 0x53, 0xfc, 0x1f, 0xc2, 0xec, 0x47, - 0xa4, 0x38, 0xa6, 0xbd, 0xf4, 0x04, 0x26, 0x23, 0x95, 0x0d, 0xf5, 0xf3, 0x81, 0xbf, 0x96, 0xe0, - 0x8b, 0x03, 0x0f, 0x92, 0x50, 0x23, 0xb2, 0x48, 0x6a, 0xb1, 0x45, 0xb2, 0x9c, 0x0d, 0xf0, 0xfa, - 0x9e, 0xa1, 0x36, 0xd6, 0x5e, 0x7c, 0xbe, 0x7c, 0xe3, 0xa7, 0x9f, 0x2f, 0xdf, 0xf8, 0xd9, 0xe7, - 0xcb, 0x37, 0x7e, 0xf7, 0x62, 0x59, 0x7a, 0x71, 0xb1, 0x2c, 0xfd, 0xf4, 0x62, 0x59, 0xfa, 0xd9, - 0xc5, 0xb2, 0xf4, 0xef, 0x17, 0xcb, 0xd2, 0x1f, 0xff, 0x7c, 0xf9, 0xc6, 0xc7, 0x77, 0x2e, 0xfd, - 0xff, 0x1e, 0xfe, 0x27, 0x00, 0x00, 0xff, 0xff, 0x08, 0x20, 0x7f, 0x0a, 0x28, 0x42, 0x00, 0x00, -} - -func (m *AllowedCSIDriver) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AllowedCSIDriver) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AllowedCSIDriver) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AllowedFlexVolume) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AllowedFlexVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Driver) - copy(dAtA[i:], m.Driver) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *AllowedHostPath) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AllowedHostPath) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AllowedHostPath) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i-- - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - i -= len(m.PathPrefix) - copy(dAtA[i:], m.PathPrefix) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PathPrefix))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + // 2890 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0xcf, 0x6f, 0x24, 0x47, + 0xf5, 0xdf, 0x9e, 0xf1, 0xd8, 0xe3, 0xe7, 0xb5, 0xbd, 0x5b, 0xeb, 0xac, 0x1d, 0xef, 0x37, 0x76, + 0xd4, 0x5f, 0x11, 0x36, 0x61, 0x33, 0xc3, 0x6e, 0x92, 0x25, 0x3f, 0xa4, 0x84, 0x1d, 0xef, 0x26, + 0xeb, 0xc4, 0x1e, 0x4f, 0x6a, 0xc6, 0x09, 0x8a, 0x08, 0xd0, 0xee, 0x29, 0x8f, 0x3b, 0xee, 0xe9, + 0x1e, 0x75, 0xd7, 0x98, 0x35, 0x27, 0x10, 0x5c, 0x72, 0x82, 0x4b, 0x20, 0x47, 0x10, 0x12, 0x57, + 0xae, 0x1c, 0x42, 0x04, 0x22, 0x48, 0x2b, 0xc4, 0x21, 0x12, 0x07, 0x72, 0xb2, 0x88, 0x73, 0x42, + 0xfc, 0x03, 0x68, 0x4f, 0xa8, 0x7e, 0x74, 0xf5, 0x6f, 0xbb, 0xc7, 0x38, 0x16, 0x41, 0x9c, 0x3c, + 0x5d, 0xef, 0xbd, 0x4f, 0xbd, 0xaa, 0x7a, 0xf5, 0xde, 0xa7, 0xba, 0xda, 0xf0, 0xf2, 0xee, 0xb3, + 0x7e, 0xcd, 0x72, 0xeb, 0xbb, 0xc3, 0x2d, 0xe2, 0x39, 0x84, 0x12, 0xbf, 0xbe, 0x47, 0x9c, 0xae, + 0xeb, 0xd5, 0xa5, 0xc0, 0x18, 0x58, 0x75, 0x72, 0x8f, 0x12, 0xc7, 0xb7, 0x5c, 0xc7, 0xaf, 0xef, + 0x5d, 0xdf, 0x22, 0xd4, 0xb8, 0x5e, 0xef, 0x11, 0x87, 0x78, 0x06, 0x25, 0xdd, 0xda, 0xc0, 0x73, + 0xa9, 0x8b, 0x1e, 0x11, 0xea, 0x35, 0x63, 0x60, 0xd5, 0x42, 0xf5, 0x9a, 0x54, 0x5f, 0x7c, 0xb2, + 0x67, 0xd1, 0x9d, 0xe1, 0x56, 0xcd, 0x74, 0xfb, 0xf5, 0x9e, 0xdb, 0x73, 0xeb, 0xdc, 0x6a, 0x6b, + 0xb8, 0xcd, 0x9f, 0xf8, 0x03, 0xff, 0x25, 0xd0, 0x16, 0xf5, 0x48, 0xe7, 0xa6, 0xeb, 0x91, 0xfa, + 0x5e, 0xaa, 0xc7, 0xc5, 0xa7, 0x43, 0x9d, 0xbe, 0x61, 0xee, 0x58, 0x0e, 0xf1, 0xf6, 0xeb, 0x83, + 0xdd, 0x1e, 0x6b, 0xf0, 0xeb, 0x7d, 0x42, 0x8d, 0x2c, 0xab, 0x7a, 0x9e, 0x95, 0x37, 0x74, 0xa8, + 0xd5, 0x27, 0x29, 0x83, 0x9b, 0xc7, 0x19, 0xf8, 0xe6, 0x0e, 0xe9, 0x1b, 0x29, 0xbb, 0xa7, 0xf2, + 0xec, 0x86, 0xd4, 0xb2, 0xeb, 0x96, 0x43, 0x7d, 0xea, 0x25, 0x8d, 0xf4, 0xf7, 0x4a, 0x30, 0x79, + 0xdb, 0x20, 0x7d, 0xd7, 0x69, 0x13, 0x8a, 0xbe, 0x03, 0x55, 0x36, 0x8c, 0xae, 0x41, 0x8d, 0x05, + 0xed, 0x51, 0xed, 0xea, 0xd4, 0x8d, 0xaf, 0xd6, 0xc2, 0x69, 0x56, 0xa8, 0xb5, 0xc1, 0x6e, 0x8f, + 0x35, 0xf8, 0x35, 0xa6, 0x5d, 0xdb, 0xbb, 0x5e, 0xdb, 0xd8, 0x7a, 0x87, 0x98, 0x74, 0x9d, 0x50, + 0xa3, 0x81, 0xee, 0x1f, 0x2c, 0x9f, 0x3b, 0x3c, 0x58, 0x86, 0xb0, 0x0d, 0x2b, 0x54, 0xd4, 0x84, + 0x31, 0x7f, 0x40, 0xcc, 0x85, 0x12, 0x47, 0xbf, 0x56, 0x3b, 0x72, 0x11, 0x6b, 0xca, 0xb3, 0xf6, + 0x80, 0x98, 0x8d, 0xf3, 0x12, 0x79, 0x8c, 0x3d, 0x61, 0x8e, 0x83, 0xde, 0x80, 0x71, 0x9f, 0x1a, + 0x74, 0xe8, 0x2f, 0x94, 0x39, 0x62, 0xad, 0x30, 0x22, 0xb7, 0x6a, 0xcc, 0x48, 0xcc, 0x71, 0xf1, + 0x8c, 0x25, 0x9a, 0xfe, 0xf7, 0x12, 0x20, 0xa5, 0xbb, 0xe2, 0x3a, 0x5d, 0x8b, 0x5a, 0xae, 0x83, + 0x9e, 0x87, 0x31, 0xba, 0x3f, 0x20, 0x7c, 0x72, 0x26, 0x1b, 0x8f, 0x05, 0x0e, 0x75, 0xf6, 0x07, + 0xe4, 0xc1, 0xc1, 0xf2, 0xe5, 0xb4, 0x05, 0x93, 0x60, 0x6e, 0x83, 0xd6, 0x94, 0xab, 0x25, 0x6e, + 0xfd, 0x74, 0xbc, 0xeb, 0x07, 0x07, 0xcb, 0x19, 0x41, 0x58, 0x53, 0x48, 0x71, 0x07, 0xd1, 0x1e, + 0x20, 0xdb, 0xf0, 0x69, 0xc7, 0x33, 0x1c, 0x5f, 0xf4, 0x64, 0xf5, 0x89, 0x9c, 0x84, 0x27, 0x8a, + 0x2d, 0x1a, 0xb3, 0x68, 0x2c, 0x4a, 0x2f, 0xd0, 0x5a, 0x0a, 0x0d, 0x67, 0xf4, 0x80, 0x1e, 0x83, + 0x71, 0x8f, 0x18, 0xbe, 0xeb, 0x2c, 0x8c, 0xf1, 0x51, 0xa8, 0x09, 0xc4, 0xbc, 0x15, 0x4b, 0x29, + 0x7a, 0x1c, 0x26, 0xfa, 0xc4, 0xf7, 0x8d, 0x1e, 0x59, 0xa8, 0x70, 0xc5, 0x59, 0xa9, 0x38, 0xb1, + 0x2e, 0x9a, 0x71, 0x20, 0xd7, 0x3f, 0xd0, 0x60, 0x5a, 0xcd, 0xdc, 0x9a, 0xe5, 0x53, 0xf4, 0xcd, + 0x54, 0x1c, 0xd6, 0x8a, 0x0d, 0x89, 0x59, 0xf3, 0x28, 0xbc, 0x20, 0x7b, 0xab, 0x06, 0x2d, 0x91, + 0x18, 0x5c, 0x87, 0x8a, 0x45, 0x49, 0x9f, 0xad, 0x43, 0xf9, 0xea, 0xd4, 0x8d, 0xab, 0x45, 0x43, + 0xa6, 0x31, 0x2d, 0x41, 0x2b, 0xab, 0xcc, 0x1c, 0x0b, 0x14, 0xfd, 0xa7, 0x63, 0x11, 0xf7, 0x59, + 0x68, 0xa2, 0xb7, 0xa1, 0xea, 0x13, 0x9b, 0x98, 0xd4, 0xf5, 0xa4, 0xfb, 0x4f, 0x15, 0x74, 0xdf, + 0xd8, 0x22, 0x76, 0x5b, 0x9a, 0x36, 0xce, 0x33, 0xff, 0x83, 0x27, 0xac, 0x20, 0xd1, 0xeb, 0x50, + 0xa5, 0xa4, 0x3f, 0xb0, 0x0d, 0x4a, 0xe4, 0x3e, 0xfa, 0xff, 0xe8, 0x10, 0x58, 0xe4, 0x30, 0xb0, + 0x96, 0xdb, 0xed, 0x48, 0x35, 0xbe, 0x7d, 0xd4, 0x94, 0x04, 0xad, 0x58, 0xc1, 0xa0, 0x3d, 0x98, + 0x19, 0x0e, 0xba, 0x4c, 0x93, 0xb2, 0xec, 0xd0, 0xdb, 0x97, 0x91, 0x74, 0xb3, 0xe8, 0xdc, 0x6c, + 0xc6, 0xac, 0x1b, 0x97, 0x65, 0x5f, 0x33, 0xf1, 0x76, 0x9c, 0xe8, 0x05, 0xdd, 0x82, 0xd9, 0xbe, + 0xe5, 0x60, 0x62, 0x74, 0xf7, 0xdb, 0xc4, 0x74, 0x9d, 0xae, 0xcf, 0xc3, 0xaa, 0xd2, 0x98, 0x97, + 0x00, 0xb3, 0xeb, 0x71, 0x31, 0x4e, 0xea, 0xa3, 0x57, 0x01, 0x05, 0xc3, 0x78, 0x45, 0x24, 0x37, + 0xcb, 0x75, 0x78, 0xcc, 0x95, 0xc3, 0xe0, 0xee, 0xa4, 0x34, 0x70, 0x86, 0x15, 0x5a, 0x83, 0x39, + 0x8f, 0xec, 0x59, 0x6c, 0x8c, 0x77, 0x2d, 0x9f, 0xba, 0xde, 0xfe, 0x9a, 0xd5, 0xb7, 0xe8, 0xc2, + 0x38, 0xf7, 0x69, 0xe1, 0xf0, 0x60, 0x79, 0x0e, 0x67, 0xc8, 0x71, 0xa6, 0x95, 0xfe, 0xb3, 0x71, + 0x98, 0x4d, 0xe4, 0x1b, 0xf4, 0x06, 0x5c, 0x36, 0x87, 0x9e, 0x47, 0x1c, 0xda, 0x1c, 0xf6, 0xb7, + 0x88, 0xd7, 0x36, 0x77, 0x48, 0x77, 0x68, 0x93, 0x2e, 0x0f, 0x94, 0x4a, 0x63, 0x49, 0x7a, 0x7c, + 0x79, 0x25, 0x53, 0x0b, 0xe7, 0x58, 0xb3, 0x59, 0x70, 0x78, 0xd3, 0xba, 0xe5, 0xfb, 0x0a, 0xb3, + 0xc4, 0x31, 0xd5, 0x2c, 0x34, 0x53, 0x1a, 0x38, 0xc3, 0x8a, 0xf9, 0xd8, 0x25, 0xbe, 0xe5, 0x91, + 0x6e, 0xd2, 0xc7, 0x72, 0xdc, 0xc7, 0xdb, 0x99, 0x5a, 0x38, 0xc7, 0x1a, 0x3d, 0x03, 0x53, 0xa2, + 0x37, 0xbe, 0x7e, 0x72, 0xa1, 0x2f, 0x49, 0xb0, 0xa9, 0x66, 0x28, 0xc2, 0x51, 0x3d, 0x36, 0x34, + 0x77, 0xcb, 0x27, 0xde, 0x1e, 0xe9, 0xe6, 0x2f, 0xf0, 0x46, 0x4a, 0x03, 0x67, 0x58, 0xb1, 0xa1, + 0x89, 0x08, 0x4c, 0x0d, 0x6d, 0x3c, 0x3e, 0xb4, 0xcd, 0x4c, 0x2d, 0x9c, 0x63, 0xcd, 0xe2, 0x58, + 0xb8, 0x7c, 0x6b, 0xcf, 0xb0, 0x6c, 0x63, 0xcb, 0x26, 0x0b, 0x13, 0xf1, 0x38, 0x6e, 0xc6, 0xc5, + 0x38, 0xa9, 0x8f, 0x5e, 0x81, 0x8b, 0xa2, 0x69, 0xd3, 0x31, 0x14, 0x48, 0x95, 0x83, 0x3c, 0x2c, + 0x41, 0x2e, 0x36, 0x93, 0x0a, 0x38, 0x6d, 0x83, 0x9e, 0x87, 0x19, 0xd3, 0xb5, 0x6d, 0x1e, 0x8f, + 0x2b, 0xee, 0xd0, 0xa1, 0x0b, 0x93, 0x1c, 0x05, 0xb1, 0xfd, 0xb8, 0x12, 0x93, 0xe0, 0x84, 0x26, + 0x22, 0x00, 0x66, 0x50, 0x70, 0xfc, 0x05, 0xe0, 0xf9, 0xf1, 0x7a, 0xd1, 0x1c, 0xa0, 0x4a, 0x55, + 0xc8, 0x01, 0x54, 0x93, 0x8f, 0x23, 0xc0, 0xfa, 0x9f, 0x34, 0x98, 0xcf, 0x49, 0x1d, 0xe8, 0xa5, + 0x58, 0x89, 0xfd, 0x4a, 0xa2, 0xc4, 0x5e, 0xc9, 0x31, 0x8b, 0xd4, 0x59, 0x07, 0xa6, 0x3d, 0x36, + 0x2a, 0xa7, 0x27, 0x54, 0x64, 0x8e, 0x7c, 0xe6, 0x98, 0x61, 0xe0, 0xa8, 0x4d, 0x98, 0xf3, 0x2f, + 0x1e, 0x1e, 0x2c, 0x4f, 0xc7, 0x64, 0x38, 0x0e, 0xaf, 0xbf, 0x5f, 0x02, 0xb8, 0x4d, 0x06, 0xb6, + 0xbb, 0xdf, 0x27, 0xce, 0x59, 0x70, 0xa8, 0x8d, 0x18, 0x87, 0x7a, 0xf2, 0xb8, 0xe5, 0x51, 0xae, + 0xe5, 0x92, 0xa8, 0x37, 0x13, 0x24, 0xaa, 0x5e, 0x1c, 0xf2, 0x68, 0x16, 0xf5, 0xd7, 0x32, 0x5c, + 0x0a, 0x95, 0x43, 0x1a, 0xf5, 0x42, 0x6c, 0x8d, 0xbf, 0x9c, 0x58, 0xe3, 0xf9, 0x0c, 0x93, 0xcf, + 0x8d, 0x47, 0xbd, 0x03, 0x33, 0x8c, 0xe5, 0x88, 0xb5, 0xe4, 0x1c, 0x6a, 0x7c, 0x64, 0x0e, 0xa5, + 0xaa, 0xdd, 0x5a, 0x0c, 0x09, 0x27, 0x90, 0x73, 0x38, 0xdb, 0xc4, 0x17, 0x91, 0xb3, 0x7d, 0xa8, + 0xc1, 0x4c, 0xb8, 0x4c, 0x67, 0x40, 0xda, 0x9a, 0x71, 0xd2, 0xf6, 0x78, 0xe1, 0x10, 0xcd, 0x61, + 0x6d, 0xff, 0x64, 0x04, 0x5f, 0x29, 0xb1, 0x0d, 0xbe, 0x65, 0x98, 0xbb, 0xe8, 0x51, 0x18, 0x73, + 0x8c, 0x7e, 0x10, 0x99, 0x6a, 0xb3, 0x34, 0x8d, 0x3e, 0xc1, 0x5c, 0x82, 0xde, 0xd3, 0x00, 0xc9, + 0x2a, 0x70, 0xcb, 0x71, 0x5c, 0x6a, 0x88, 0x5c, 0x29, 0xdc, 0x5a, 0x2d, 0xec, 0x56, 0xd0, 0x63, + 0x6d, 0x33, 0x85, 0x75, 0xc7, 0xa1, 0xde, 0x7e, 0xb8, 0xc8, 0x69, 0x05, 0x9c, 0xe1, 0x00, 0x32, + 0x00, 0x3c, 0x89, 0xd9, 0x71, 0xe5, 0x46, 0x7e, 0xb2, 0x40, 0xce, 0x63, 0x06, 0x2b, 0xae, 0xb3, + 0x6d, 0xf5, 0xc2, 0xb4, 0x83, 0x15, 0x10, 0x8e, 0x80, 0x2e, 0xde, 0x81, 0xf9, 0x1c, 0x6f, 0xd1, + 0x05, 0x28, 0xef, 0x92, 0x7d, 0x31, 0x6d, 0x98, 0xfd, 0x44, 0x73, 0x50, 0xd9, 0x33, 0xec, 0xa1, + 0x48, 0xbf, 0x93, 0x58, 0x3c, 0x3c, 0x5f, 0x7a, 0x56, 0xd3, 0x3f, 0xa8, 0x44, 0x63, 0x87, 0x33, + 0xe6, 0xab, 0x50, 0xf5, 0xc8, 0xc0, 0xb6, 0x4c, 0xc3, 0x97, 0x44, 0x88, 0x93, 0x5f, 0x2c, 0xdb, + 0xb0, 0x92, 0xc6, 0xb8, 0x75, 0xe9, 0xf3, 0xe5, 0xd6, 0xe5, 0xd3, 0xe1, 0xd6, 0xdf, 0x86, 0xaa, + 0x1f, 0xb0, 0xea, 0x31, 0x0e, 0x79, 0x7d, 0x84, 0xfc, 0x2a, 0x09, 0xb5, 0xea, 0x40, 0x51, 0x69, + 0x05, 0x9a, 0x45, 0xa2, 0x2b, 0x23, 0x92, 0xe8, 0x53, 0x25, 0xbe, 0x2c, 0xdf, 0x0c, 0x8c, 0xa1, + 0x4f, 0xba, 0x3c, 0xb7, 0x55, 0xc3, 0x7c, 0xd3, 0xe2, 0xad, 0x58, 0x4a, 0xd1, 0xdb, 0xb1, 0x90, + 0xad, 0x9e, 0x24, 0x64, 0x67, 0xf2, 0xc3, 0x15, 0x6d, 0xc2, 0xfc, 0xc0, 0x73, 0x7b, 0x1e, 0xf1, + 0xfd, 0xdb, 0xc4, 0xe8, 0xda, 0x96, 0x43, 0x82, 0xf9, 0x11, 0x8c, 0xe8, 0xca, 0xe1, 0xc1, 0xf2, + 0x7c, 0x2b, 0x5b, 0x05, 0xe7, 0xd9, 0xea, 0xf7, 0xc7, 0xe0, 0x42, 0xb2, 0x02, 0xe6, 0x90, 0x54, + 0xed, 0x44, 0x24, 0xf5, 0x5a, 0x64, 0x33, 0x08, 0x06, 0xaf, 0x56, 0x3f, 0x63, 0x43, 0xdc, 0x82, + 0x59, 0x99, 0x0d, 0x02, 0xa1, 0xa4, 0xe9, 0x6a, 0xf5, 0x37, 0xe3, 0x62, 0x9c, 0xd4, 0x47, 0x2f, + 0xc0, 0xb4, 0xc7, 0x79, 0x77, 0x00, 0x20, 0xb8, 0xeb, 0x43, 0x12, 0x60, 0x1a, 0x47, 0x85, 0x38, + 0xae, 0xcb, 0x78, 0x6b, 0x48, 0x47, 0x03, 0x80, 0xb1, 0x38, 0x6f, 0xbd, 0x95, 0x54, 0xc0, 0x69, + 0x1b, 0xb4, 0x0e, 0x97, 0x86, 0x4e, 0x1a, 0x4a, 0x84, 0xf2, 0x15, 0x09, 0x75, 0x69, 0x33, 0xad, + 0x82, 0xb3, 0xec, 0xd0, 0x76, 0x8c, 0xca, 0x8e, 0xf3, 0xf4, 0x7c, 0xa3, 0xf0, 0xc6, 0x2b, 0xcc, + 0x65, 0x33, 0xe8, 0x76, 0xb5, 0x28, 0xdd, 0xd6, 0x7f, 0xaf, 0x45, 0x8b, 0x90, 0xa2, 0xc0, 0xc7, + 0xbd, 0x65, 0x4a, 0x59, 0x44, 0xd8, 0x91, 0x9b, 0xcd, 0x7e, 0x6f, 0x8e, 0xc4, 0x7e, 0xc3, 0xe2, + 0x79, 0x3c, 0xfd, 0xfd, 0x83, 0x06, 0xb3, 0x77, 0x3b, 0x9d, 0xd6, 0xaa, 0xc3, 0x77, 0x4b, 0xcb, + 0xa0, 0x3b, 0xac, 0x8a, 0x0e, 0x0c, 0xba, 0x93, 0xac, 0xa2, 0x4c, 0x86, 0xb9, 0x04, 0x3d, 0x0d, + 0x55, 0xf6, 0x97, 0x39, 0xce, 0xc3, 0x75, 0x92, 0x27, 0x99, 0x6a, 0x4b, 0xb6, 0x3d, 0x88, 0xfc, + 0xc6, 0x4a, 0x13, 0x7d, 0x03, 0x26, 0xd8, 0xde, 0x26, 0x4e, 0xb7, 0x20, 0xf9, 0x95, 0x4e, 0x35, + 0x84, 0x51, 0xc8, 0x67, 0x64, 0x03, 0x0e, 0xe0, 0xf4, 0x5d, 0x98, 0x8b, 0x0c, 0x02, 0x0f, 0x6d, + 0xf2, 0x06, 0xab, 0x57, 0xa8, 0x0d, 0x15, 0xd6, 0x3b, 0xab, 0x4a, 0xe5, 0x02, 0xaf, 0x17, 0x13, + 0x13, 0x11, 0x72, 0x0f, 0xf6, 0xe4, 0x63, 0x81, 0xa5, 0x6f, 0xc0, 0xc4, 0x6a, 0xab, 0x61, 0xbb, + 0x82, 0x6f, 0x98, 0x56, 0xd7, 0x4b, 0xce, 0xd4, 0xca, 0xea, 0x6d, 0x8c, 0xb9, 0x04, 0xe9, 0x30, + 0x4e, 0xee, 0x99, 0x64, 0x40, 0x39, 0xc5, 0x98, 0x6c, 0x00, 0x4b, 0xa4, 0x77, 0x78, 0x0b, 0x96, + 0x12, 0xfd, 0xc7, 0x25, 0x98, 0x90, 0xdd, 0x9e, 0xc1, 0xf9, 0x63, 0x2d, 0x76, 0xfe, 0x78, 0xa2, + 0xd8, 0x12, 0xe4, 0x1e, 0x3e, 0x3a, 0x89, 0xc3, 0xc7, 0xb5, 0x82, 0x78, 0x47, 0x9f, 0x3c, 0xde, + 0x2d, 0xc1, 0x4c, 0x7c, 0xf1, 0xd1, 0x33, 0x30, 0xc5, 0x52, 0xad, 0x65, 0x92, 0x66, 0xc8, 0xf0, + 0xd4, 0xeb, 0x87, 0x76, 0x28, 0xc2, 0x51, 0x3d, 0xd4, 0x53, 0x66, 0x2d, 0xd7, 0xa3, 0x72, 0xd0, + 0xf9, 0x53, 0x3a, 0xa4, 0x96, 0x5d, 0x13, 0x2f, 0xdb, 0x6b, 0xab, 0x0e, 0xdd, 0xf0, 0xda, 0xd4, + 0xb3, 0x9c, 0x5e, 0xaa, 0x23, 0x06, 0x86, 0xa3, 0xc8, 0xe8, 0x4d, 0x96, 0xf6, 0x7d, 0x77, 0xe8, + 0x99, 0x24, 0x8b, 0xbe, 0x05, 0xd4, 0x83, 0x6d, 0x84, 0xee, 0x9a, 0x6b, 0x1a, 0xb6, 0x58, 0x1c, + 0x4c, 0xb6, 0x89, 0x47, 0x1c, 0x93, 0x04, 0x94, 0x49, 0x40, 0x60, 0x05, 0xa6, 0xff, 0x46, 0x83, + 0x29, 0x39, 0x17, 0x67, 0x40, 0xd4, 0x5f, 0x8b, 0x13, 0xf5, 0xc7, 0x0a, 0xee, 0xd0, 0x6c, 0x96, + 0xfe, 0x5b, 0x0d, 0x16, 0x03, 0xd7, 0x5d, 0xa3, 0xdb, 0x30, 0x6c, 0xc3, 0x31, 0x89, 0x17, 0xc4, + 0xfa, 0x22, 0x94, 0xac, 0x81, 0x5c, 0x49, 0x90, 0x00, 0xa5, 0xd5, 0x16, 0x2e, 0x59, 0x03, 0x56, + 0x45, 0x77, 0x5c, 0x9f, 0x72, 0x36, 0x2f, 0x0e, 0x8a, 0xca, 0xeb, 0xbb, 0xb2, 0x1d, 0x2b, 0x0d, + 0xb4, 0x09, 0x95, 0x81, 0xeb, 0x51, 0x56, 0xb9, 0xca, 0x89, 0xf5, 0x3d, 0xc2, 0x6b, 0xb6, 0x6e, + 0x32, 0x10, 0xc3, 0x9d, 0xce, 0x60, 0xb0, 0x40, 0xd3, 0x7f, 0xa0, 0xc1, 0xc3, 0x19, 0xfe, 0x4b, + 0xd2, 0xd0, 0x85, 0x09, 0x4b, 0x08, 0x65, 0x7a, 0x79, 0xae, 0x58, 0xb7, 0x19, 0x53, 0x11, 0xa6, + 0xb6, 0x20, 0x85, 0x05, 0xd0, 0xfa, 0x2f, 0x35, 0xb8, 0x98, 0xf2, 0x97, 0xa7, 0x68, 0x16, 0xcf, + 0x92, 0x6d, 0xab, 0x14, 0xcd, 0xc2, 0x92, 0x4b, 0xd0, 0x6b, 0x50, 0xe5, 0x77, 0x44, 0xa6, 0x6b, + 0xcb, 0x09, 0xac, 0x07, 0x13, 0xd8, 0x92, 0xed, 0x0f, 0x0e, 0x96, 0xaf, 0x64, 0x9c, 0xb5, 0x03, + 0x31, 0x56, 0x00, 0x68, 0x19, 0x2a, 0xc4, 0xf3, 0x5c, 0x4f, 0x26, 0xfb, 0x49, 0x36, 0x53, 0x77, + 0x58, 0x03, 0x16, 0xed, 0xfa, 0xaf, 0xc2, 0x20, 0x65, 0xd9, 0x97, 0xf9, 0xc7, 0x16, 0x27, 0x99, + 0x18, 0xd9, 0xd2, 0x61, 0x2e, 0x41, 0x43, 0xb8, 0x60, 0x25, 0xd2, 0xb5, 0xdc, 0x9d, 0xf5, 0x62, + 0xd3, 0xa8, 0xcc, 0x1a, 0x0b, 0x12, 0xfe, 0x42, 0x52, 0x82, 0x53, 0x5d, 0xe8, 0x04, 0x52, 0x5a, + 0xe8, 0x75, 0x18, 0xdb, 0xa1, 0x74, 0x90, 0xf1, 0xb2, 0xff, 0x98, 0x22, 0x11, 0xba, 0x50, 0xe5, + 0xa3, 0xeb, 0x74, 0x5a, 0x98, 0x43, 0xe9, 0xbf, 0x2b, 0xa9, 0xf9, 0xe0, 0x27, 0xa4, 0xaf, 0xab, + 0xd1, 0xae, 0xd8, 0x86, 0xef, 0xf3, 0x14, 0x26, 0x4e, 0xf3, 0x73, 0x11, 0xc7, 0x95, 0x0c, 0xa7, + 0xb4, 0x51, 0x27, 0x2c, 0x9e, 0xda, 0x49, 0x8a, 0xe7, 0x54, 0x56, 0xe1, 0x44, 0x77, 0xa1, 0x4c, + 0xed, 0xa2, 0xa7, 0x72, 0x89, 0xd8, 0x59, 0x6b, 0x37, 0xa6, 0xe4, 0x94, 0x97, 0x3b, 0x6b, 0x6d, + 0xcc, 0x20, 0xd0, 0x06, 0x54, 0xbc, 0xa1, 0x4d, 0x58, 0x1d, 0x28, 0x17, 0xaf, 0x2b, 0x6c, 0x06, + 0xc3, 0xcd, 0xc7, 0x9e, 0x7c, 0x2c, 0x70, 0xf4, 0x1f, 0x6a, 0x30, 0x1d, 0xab, 0x16, 0xc8, 0x83, + 0xf3, 0x76, 0x64, 0xef, 0xc8, 0x79, 0x78, 0x76, 0xf4, 0x5d, 0x27, 0x37, 0xfd, 0x9c, 0xec, 0xf7, + 0x7c, 0x54, 0x86, 0x63, 0x7d, 0xe8, 0x06, 0x40, 0x38, 0x6c, 0xb6, 0x0f, 0x58, 0xf0, 0x8a, 0x0d, + 0x2f, 0xf7, 0x01, 0x8b, 0x69, 0x1f, 0x8b, 0x76, 0x74, 0x03, 0xc0, 0x27, 0xa6, 0x47, 0x68, 0x33, + 0x4c, 0x5c, 0xaa, 0x1c, 0xb7, 0x95, 0x04, 0x47, 0xb4, 0xf4, 0x5f, 0x94, 0x60, 0xba, 0x49, 0xe8, + 0x77, 0x5d, 0x6f, 0xb7, 0xe5, 0xda, 0x96, 0xb9, 0x7f, 0x06, 0x24, 0x00, 0xc7, 0x48, 0xc0, 0x71, + 0xf9, 0x32, 0xe6, 0x5d, 0x2e, 0x15, 0x78, 0x2b, 0x41, 0x05, 0x6e, 0x8c, 0x84, 0x7a, 0x34, 0x21, + 0xf8, 0x50, 0x83, 0xf9, 0x98, 0xfe, 0x9d, 0x30, 0xd7, 0xa8, 0xe4, 0xaf, 0x15, 0x4a, 0xfe, 0x31, + 0x18, 0x96, 0x30, 0xb3, 0x93, 0x3f, 0x5a, 0x83, 0x12, 0x75, 0xe5, 0xce, 0x18, 0x0d, 0x93, 0x10, + 0x2f, 0xac, 0x67, 0x1d, 0x17, 0x97, 0xa8, 0xab, 0xff, 0x51, 0x83, 0x85, 0x98, 0x56, 0x34, 0x5b, + 0x7e, 0x4e, 0x23, 0xc0, 0x30, 0xb6, 0xed, 0xb9, 0xfd, 0x13, 0x8f, 0x41, 0x2d, 0xf2, 0xcb, 0x9e, + 0xdb, 0xc7, 0x1c, 0x4b, 0xff, 0x48, 0x83, 0x8b, 0x31, 0xcd, 0x33, 0xe0, 0x24, 0xaf, 0xc7, 0x39, + 0xc9, 0xb5, 0x51, 0x06, 0x92, 0xc3, 0x4c, 0x3e, 0x2a, 0x25, 0x86, 0xc1, 0x06, 0x8c, 0xb6, 0x61, + 0x6a, 0xe0, 0x76, 0xdb, 0xa7, 0x70, 0xf9, 0x3b, 0xcb, 0xb8, 0x62, 0x2b, 0xc4, 0xc2, 0x51, 0x60, + 0x74, 0x0f, 0x2e, 0x32, 0xda, 0xe2, 0x0f, 0x0c, 0x93, 0xb4, 0x4f, 0xe1, 0x75, 0xd8, 0x43, 0xfc, + 0x76, 0x29, 0x89, 0x88, 0xd3, 0x9d, 0xa0, 0x75, 0x98, 0xb0, 0x06, 0xfc, 0xec, 0x22, 0x37, 0xe9, + 0xb1, 0x04, 0x4f, 0x9c, 0x74, 0x44, 0xf9, 0x90, 0x0f, 0x38, 0xc0, 0xd0, 0xff, 0x92, 0x8c, 0x06, + 0x4e, 0x85, 0x5f, 0x89, 0x50, 0x0f, 0x79, 0x0f, 0x74, 0x32, 0xda, 0xd1, 0x94, 0x2c, 0xe7, 0xa4, + 0xac, 0xbd, 0x9a, 0xe0, 0x44, 0x5f, 0x82, 0x09, 0xe2, 0x74, 0xf9, 0x41, 0x40, 0xbc, 0x64, 0xe1, + 0xa3, 0xba, 0x23, 0x9a, 0x70, 0x20, 0xd3, 0x7f, 0x54, 0x4e, 0x8c, 0x8a, 0x97, 0xf0, 0x77, 0x4e, + 0x2d, 0x38, 0xd4, 0x61, 0x22, 0x37, 0x40, 0xb6, 0x42, 0x6a, 0x29, 0x62, 0xfe, 0x6b, 0xa3, 0xc4, + 0x7c, 0xb4, 0xb6, 0xe6, 0x12, 0x4b, 0xf4, 0x2d, 0x18, 0x27, 0xa2, 0x0b, 0x51, 0xb1, 0x6f, 0x8e, + 0xd2, 0x45, 0x98, 0x7e, 0xc3, 0x94, 0x2d, 0xdb, 0x24, 0x2a, 0x7a, 0x89, 0xcd, 0x17, 0xd3, 0x65, + 0x47, 0x1e, 0xc1, 0xcc, 0x27, 0x1b, 0x8f, 0x88, 0x61, 0xab, 0xe6, 0x07, 0x07, 0xcb, 0x10, 0x3e, + 0xe2, 0xa8, 0x85, 0xfe, 0x3d, 0xb8, 0x94, 0x51, 0x22, 0x90, 0x19, 0x7b, 0x33, 0x24, 0x32, 0x66, + 0xbd, 0xd8, 0x32, 0x14, 0xbf, 0xe2, 0x7c, 0xbf, 0x04, 0x20, 0xdf, 0x45, 0x9d, 0xcd, 0x97, 0x55, + 0xa3, 0xdd, 0x0a, 0x86, 0xae, 0x9d, 0xda, 0xad, 0x60, 0x04, 0xf2, 0xe8, 0x52, 0xfc, 0x8f, 0x12, + 0x5c, 0x0a, 0x95, 0x0b, 0xdf, 0x0a, 0x66, 0x98, 0xfc, 0xef, 0xeb, 0xaa, 0x62, 0x37, 0x75, 0xe1, + 0xd4, 0xfd, 0xe7, 0xdd, 0xd4, 0x85, 0xbe, 0xe5, 0x54, 0xda, 0x5f, 0x97, 0xa2, 0x03, 0x18, 0xf1, + 0xba, 0xe8, 0x14, 0x3e, 0x30, 0xfa, 0xc2, 0xdd, 0x38, 0xe9, 0x7f, 0x2e, 0xc3, 0x85, 0xe4, 0x6e, + 0x8c, 0xdd, 0x2a, 0x68, 0xc7, 0xde, 0x2a, 0xb4, 0x60, 0x6e, 0x7b, 0x68, 0xdb, 0xfb, 0x7c, 0x0c, + 0x91, 0xab, 0x05, 0x71, 0x1f, 0xf1, 0x7f, 0xd2, 0x72, 0xee, 0xe5, 0x0c, 0x1d, 0x9c, 0x69, 0x99, + 0xbe, 0x64, 0x18, 0xfb, 0x77, 0x2f, 0x19, 0x2a, 0x27, 0xb8, 0x64, 0xc8, 0xbe, 0xa7, 0x29, 0x9f, + 0xe8, 0x9e, 0xe6, 0x24, 0x37, 0x0c, 0x19, 0x49, 0xec, 0xd8, 0x52, 0xf2, 0x22, 0xcc, 0xc4, 0x6f, + 0xbd, 0xc4, 0x5a, 0x8a, 0x8b, 0x37, 0x79, 0xc7, 0x14, 0x59, 0x4b, 0xd1, 0x8e, 0x95, 0x86, 0x7e, + 0xa8, 0xc1, 0xe5, 0xec, 0xaf, 0x5b, 0x90, 0x0d, 0x33, 0x7d, 0xe3, 0x5e, 0xf4, 0x8b, 0x23, 0xed, + 0x84, 0x4c, 0x89, 0x5f, 0x77, 0xac, 0xc7, 0xb0, 0x70, 0x02, 0x1b, 0xbd, 0x05, 0xd5, 0xbe, 0x71, + 0xaf, 0x3d, 0xf4, 0x7a, 0xe4, 0xc4, 0x8c, 0x8c, 0x6f, 0xa3, 0x75, 0x89, 0x82, 0x15, 0x9e, 0xfe, + 0x99, 0x06, 0xf3, 0x39, 0x97, 0x18, 0xff, 0x45, 0xa3, 0x7c, 0xb7, 0x04, 0x95, 0xb6, 0x69, 0xd8, + 0xe4, 0x0c, 0x08, 0xc5, 0xab, 0x31, 0x42, 0x71, 0xdc, 0x57, 0xb2, 0xdc, 0xab, 0x5c, 0x2e, 0x81, + 0x13, 0x5c, 0xe2, 0x89, 0x42, 0x68, 0x47, 0xd3, 0x88, 0xe7, 0x60, 0x52, 0x75, 0x3a, 0x5a, 0x76, + 0xd3, 0x7f, 0x5e, 0x82, 0xa9, 0x48, 0x17, 0x23, 0xe6, 0xc6, 0xed, 0x58, 0x41, 0x28, 0x17, 0x78, + 0x83, 0x14, 0xe9, 0xab, 0x16, 0x94, 0x00, 0xf1, 0x95, 0x47, 0x78, 0xaf, 0x9f, 0xae, 0x0c, 0x2f, + 0xc2, 0x0c, 0x35, 0xbc, 0x1e, 0xa1, 0xea, 0xc8, 0x20, 0x5e, 0x9e, 0xaa, 0xcf, 0x8d, 0x3a, 0x31, + 0x29, 0x4e, 0x68, 0x2f, 0xbe, 0x00, 0xd3, 0xb1, 0xce, 0x46, 0xf9, 0x48, 0xa3, 0xb1, 0x72, 0xff, + 0xd3, 0xa5, 0x73, 0x1f, 0x7f, 0xba, 0x74, 0xee, 0x93, 0x4f, 0x97, 0xce, 0x7d, 0xff, 0x70, 0x49, + 0xbb, 0x7f, 0xb8, 0xa4, 0x7d, 0x7c, 0xb8, 0xa4, 0x7d, 0x72, 0xb8, 0xa4, 0xfd, 0xed, 0x70, 0x49, + 0xfb, 0xc9, 0x67, 0x4b, 0xe7, 0xde, 0x7a, 0xe4, 0xc8, 0xff, 0xd9, 0xf8, 0x57, 0x00, 0x00, 0x00, + 0xff, 0xff, 0x39, 0x36, 0x95, 0x55, 0xec, 0x31, 0x00, 0x00, } func (m *DaemonSet) Marshal() (dAtA []byte, err error) { @@ -2882,48 +2320,6 @@ func (m *DeploymentStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *FSGroupStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *FSGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *FSGroupStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Ranges) > 0 { - for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -3006,7 +2402,7 @@ func (m *HTTPIngressRuleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *HostPortRange) Marshal() (dAtA []byte, err error) { +func (m *IPBlock) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3016,26 +2412,34 @@ func (m *HostPortRange) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *HostPortRange) MarshalTo(dAtA []byte) (int, error) { +func (m *IPBlock) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *HostPortRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *IPBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) - i-- - dAtA[i] = 0x10 - i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) + if len(m.Except) > 0 { + for iNdEx := len(m.Except) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Except[iNdEx]) + copy(dAtA[i:], m.Except[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Except[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.CIDR) + copy(dAtA[i:], m.CIDR) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR))) i-- - dAtA[i] = 0x8 + dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *IDRange) Marshal() (dAtA []byte, err error) { +func (m *Ingress) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3045,100 +2449,34 @@ func (m *IDRange) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *IDRange) MarshalTo(dAtA []byte) (int, error) { +func (m *Ingress) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *IDRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- - dAtA[i] = 0x10 - i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - -func (m *IPBlock) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IPBlock) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IPBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Except) > 0 { - for iNdEx := len(m.Except) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Except[iNdEx]) - copy(dAtA[i:], m.Except[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Except[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.CIDR) - copy(dAtA[i:], m.CIDR) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Ingress) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Ingress) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0x12 { @@ -4001,7 +3339,7 @@ func (m *NetworkPolicyStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *PodSecurityPolicy) Marshal() (dAtA []byte, err error) { +func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4011,16 +3349,26 @@ func (m *PodSecurityPolicy) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PodSecurityPolicy) MarshalTo(dAtA []byte) (int, error) { +func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodSecurityPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ReplicaSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a { size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -4044,7 +3392,60 @@ func (m *PodSecurityPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *PodSecurityPolicyList) Marshal() (dAtA []byte, err error) { +func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4054,12 +3455,12 @@ func (m *PodSecurityPolicyList) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PodSecurityPolicyList) MarshalTo(dAtA []byte) (int, error) { +func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodSecurityPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ReplicaSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -4091,7 +3492,7 @@ func (m *PodSecurityPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *PodSecurityPolicySpec) Marshal() (dAtA []byte, err error) { +func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4101,49 +3502,32 @@ func (m *PodSecurityPolicySpec) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { +func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodSecurityPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ReplicaSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.RuntimeClass != nil { - { - size, err := m.RuntimeClass.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xc2 - } - if len(m.AllowedCSIDrivers) > 0 { - for iNdEx := len(m.AllowedCSIDrivers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.AllowedCSIDrivers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xba + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x20 + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.RunAsGroup != nil { + i-- + dAtA[i] = 0x1a + if m.Selector != nil { { - size, err := m.RunAsGroup.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -4151,63 +3535,40 @@ func (m *PodSecurityPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xb2 - } - if len(m.AllowedProcMountTypes) > 0 { - for iNdEx := len(m.AllowedProcMountTypes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AllowedProcMountTypes[iNdEx]) - copy(dAtA[i:], m.AllowedProcMountTypes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedProcMountTypes[iNdEx]))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xaa - } - } - if len(m.ForbiddenSysctls) > 0 { - for iNdEx := len(m.ForbiddenSysctls) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ForbiddenSysctls[iNdEx]) - copy(dAtA[i:], m.ForbiddenSysctls[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ForbiddenSysctls[iNdEx]))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xa2 - } + dAtA[i] = 0x12 } - if len(m.AllowedUnsafeSysctls) > 0 { - for iNdEx := len(m.AllowedUnsafeSysctls) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AllowedUnsafeSysctls[iNdEx]) - copy(dAtA[i:], m.AllowedUnsafeSysctls[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedUnsafeSysctls[iNdEx]))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x9a - } + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 } - if len(m.AllowedFlexVolumes) > 0 { - for iNdEx := len(m.AllowedFlexVolumes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.AllowedFlexVolumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x92 - } + return len(dAtA) - i, nil +} + +func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if len(m.AllowedHostPaths) > 0 { - for iNdEx := len(m.AllowedHostPaths) - 1; iNdEx >= 0; iNdEx-- { + return dAtA[:n], nil +} + +func (m *ReplicaSetStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.AllowedHostPaths[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -4215,167 +3576,148 @@ func (m *PodSecurityPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x8a - } - } - if m.AllowPrivilegeEscalation != nil { - i-- - if *m.AllowPrivilegeEscalation { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x80 - } - if m.DefaultAllowPrivilegeEscalation != nil { - i-- - if *m.DefaultAllowPrivilegeEscalation { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + dAtA[i] = 0x32 } - i-- - dAtA[i] = 0x78 } + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) i-- - if m.ReadOnlyRootFilesystem { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) i-- - dAtA[i] = 0x70 - { - size, err := m.FSGroup.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) i-- - dAtA[i] = 0x6a - { - size, err := m.SupplementalGroups.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x62 - { - size, err := m.RunAsUser.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x5a - { - size, err := m.SELinux.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) i-- - dAtA[i] = 0x52 + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) i-- - if m.HostIPC { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *RollbackConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *RollbackConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollbackConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) i-- - dAtA[i] = 0x48 - i-- - if m.HostPID { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - i-- - dAtA[i] = 0x40 - if len(m.HostPorts) > 0 { - for iNdEx := len(m.HostPorts) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.HostPorts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + return dAtA[:n], nil +} + +func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateDaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MaxSurge != nil { + { + size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x3a + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - i-- - if m.HostNetwork { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x30 - if len(m.Volumes) > 0 { - for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Volumes[iNdEx]) - copy(dAtA[i:], m.Volumes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Volumes[iNdEx]))) - i-- - dAtA[i] = 0x2a + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - if len(m.AllowedCapabilities) > 0 { - for iNdEx := len(m.AllowedCapabilities) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AllowedCapabilities[iNdEx]) - copy(dAtA[i:], m.AllowedCapabilities[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedCapabilities[iNdEx]))) - i-- - dAtA[i] = 0x22 - } + return len(dAtA) - i, nil +} + +func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if len(m.RequiredDropCapabilities) > 0 { - for iNdEx := len(m.RequiredDropCapabilities) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.RequiredDropCapabilities[iNdEx]) - copy(dAtA[i:], m.RequiredDropCapabilities[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequiredDropCapabilities[iNdEx]))) - i-- - dAtA[i] = 0x1a + return dAtA[:n], nil +} + +func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MaxSurge != nil { + { + size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - if len(m.DefaultAddCapabilities) > 0 { - for iNdEx := len(m.DefaultAddCapabilities) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.DefaultAddCapabilities[iNdEx]) - copy(dAtA[i:], m.DefaultAddCapabilities[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DefaultAddCapabilities[iNdEx]))) - i-- - dAtA[i] = 0x12 + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - i-- - if m.Privileged { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 return len(dAtA) - i, nil } -func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { +func (m *Scale) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4385,12 +3727,12 @@ func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { +func (m *Scale) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ReplicaSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Scale) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -4428,7 +3770,7 @@ func (m *ReplicaSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { +func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4438,50 +3780,23 @@ func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { +func (m *ScaleSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ReplicaSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ScaleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x2a - i -= len(m.Reason) - copy(dAtA[i:], m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i-- - dAtA[i] = 0x22 - { - size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - i -= len(m.Status) - copy(dAtA[i:], m.Status) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i-- - dAtA[i] = 0x12 - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x8 return len(dAtA) - i, nil } -func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { +func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4491,687 +3806,610 @@ func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { +func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ReplicaSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ScaleStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + i -= len(m.TargetSelector) + copy(dAtA[i:], m.TargetSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) + i-- + dAtA[i] = 0x1a + if len(m.Selector) > 0 { + keysForSelector := make([]string, 0, len(m.Selector)) + for k := range m.Selector { + keysForSelector = append(keysForSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.Selector[string(keysForSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) i-- dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i -= len(keysForSelector[iNdEx]) + copy(dAtA[i:], keysForSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } } + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x8 return len(dAtA) - i, nil } -func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ } - return dAtA[:n], nil + dAtA[offset] = uint8(v) + return base +} +func (m *DaemonSet) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *DaemonSetCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ReplicaSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *DaemonSetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - i-- - dAtA[i] = 0x20 - { - size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0x1a + return n +} + +func (m *DaemonSetSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l if m.Selector != nil { - { - size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) } - if m.Replicas != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - i-- - dAtA[i] = 0x8 + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.UpdateStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + n += 1 + sovGenerated(uint64(m.TemplateGeneration)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) } - return len(dAtA) - i, nil + return n } -func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *DaemonSetStatus) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *ReplicaSetStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReplicaSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i var l int _ = l + n += 1 + sovGenerated(uint64(m.CurrentNumberScheduled)) + n += 1 + sovGenerated(uint64(m.NumberMisscheduled)) + n += 1 + sovGenerated(uint64(m.DesiredNumberScheduled)) + n += 1 + sovGenerated(uint64(m.NumberReady)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.UpdatedNumberScheduled)) + n += 1 + sovGenerated(uint64(m.NumberAvailable)) + n += 1 + sovGenerated(uint64(m.NumberUnavailable)) + if m.CollisionCount != nil { + n += 1 + sovGenerated(uint64(*m.CollisionCount)) + } if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) - i-- - dAtA[i] = 0x28 - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - i-- - dAtA[i] = 0x20 - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - i-- - dAtA[i] = 0x18 - i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) - i-- - dAtA[i] = 0x10 - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + return n } -func (m *RollbackConfig) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *DaemonSetUpdateStrategy) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *RollbackConfig) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RollbackConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - -func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) } - return dAtA[:n], nil + return n } -func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *Deployment) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *RollingUpdateDaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *DeploymentCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - if m.MaxSurge != nil { - { - size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastUpdateTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentList) Size() (n int) { + if m == nil { + return 0 } - if m.MaxUnavailable != nil { - { - size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + return n } -func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *DeploymentRollback) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RollingUpdateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i var l int _ = l - if m.MaxSurge != nil { - { - size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.MaxUnavailable != nil { - { - size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.UpdatedAnnotations) > 0 { + for k, v := range m.UpdatedAnnotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + l = m.RollbackTo.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *RunAsGroupStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *DeploymentSpec) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *RunAsGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Strategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + } + n += 2 + if m.RollbackTo != nil { + l = m.RollbackTo.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ProgressDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ProgressDeadlineSeconds)) + } + return n } -func (m *RunAsGroupStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *DeploymentStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - if len(m.Ranges) > 0 { - for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) + n += 1 + sovGenerated(uint64(m.UnavailableReplicas)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + if m.CollisionCount != nil { + n += 1 + sovGenerated(uint64(*m.CollisionCount)) + } + return n } -func (m *RunAsUserStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *DeploymentStrategy) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (m *RunAsUserStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *HTTPIngressPath) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Backend.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.PathType != nil { + l = len(*m.PathType) + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (m *RunAsUserStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *HTTPIngressRuleValue) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - if len(m.Ranges) > 0 { - for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + if len(m.Paths) > 0 { + for _, e := range m.Paths { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return n } -func (m *RuntimeClassStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *IPBlock) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *RuntimeClassStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RuntimeClassStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i var l int _ = l - if m.DefaultRuntimeClassName != nil { - i -= len(*m.DefaultRuntimeClassName) - copy(dAtA[i:], *m.DefaultRuntimeClassName) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.DefaultRuntimeClassName))) - i-- - dAtA[i] = 0x12 - } - if len(m.AllowedRuntimeClassNames) > 0 { - for iNdEx := len(m.AllowedRuntimeClassNames) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AllowedRuntimeClassNames[iNdEx]) - copy(dAtA[i:], m.AllowedRuntimeClassNames[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedRuntimeClassNames[iNdEx]))) - i-- - dAtA[i] = 0xa + l = len(m.CIDR) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Except) > 0 { + for _, s := range m.Except { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) } } - return len(dAtA) - i, nil + return n } -func (m *SELinuxStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *Ingress) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *SELinuxStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SELinuxStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i var l int _ = l - if m.SELinuxOptions != nil { - { - size, err := m.SELinuxOptions.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *Scale) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *IngressBackend) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *Scale) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Scale) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + l = len(m.ServiceName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ServicePort.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Resource != nil { + l = m.Resource.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return n } -func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *IngressList) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *ScaleSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ScaleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - -func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - return dAtA[:n], nil -} - -func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + return n } -func (m *ScaleStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *IngressLoadBalancerIngress) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - i -= len(m.TargetSelector) - copy(dAtA[i:], m.TargetSelector) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) - i-- - dAtA[i] = 0x1a - if len(m.Selector) > 0 { - keysForSelector := make([]string, 0, len(m.Selector)) - for k := range m.Selector { - keysForSelector = append(keysForSelector, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- { - v := m.Selector[string(keysForSelector[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(keysForSelector[iNdEx]) - copy(dAtA[i:], keysForSelector[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 + l = len(m.IP) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Hostname) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + return n } -func (m *SupplementalGroupsStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *IngressLoadBalancerStatus) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *SupplementalGroupsStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SupplementalGroupsStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i var l int _ = l - if len(m.Ranges) > 0 { - for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + if len(m.Ingress) > 0 { + for _, e := range m.Ingress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return n } -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *AllowedCSIDriver) Size() (n int) { +func (m *IngressPortStatus) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Name) + n += 1 + sovGenerated(uint64(m.Port)) + l = len(m.Protocol) n += 1 + l + sovGenerated(uint64(l)) + if m.Error != nil { + l = len(*m.Error) + n += 1 + l + sovGenerated(uint64(l)) + } return n } -func (m *AllowedFlexVolume) Size() (n int) { +func (m *IngressRule) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Driver) + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + l = m.IngressRuleValue.Size() n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *AllowedHostPath) Size() (n int) { +func (m *IngressRuleValue) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.PathPrefix) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 + if m.HTTP != nil { + l = m.HTTP.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } -func (m *DaemonSet) Size() (n int) { +func (m *IngressSpec) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Backend != nil { + l = m.Backend.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.TLS) > 0 { + for _, e := range m.TLS { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.IngressClassName != nil { + l = len(*m.IngressClassName) + n += 1 + l + sovGenerated(uint64(l)) + } return n } -func (m *DaemonSetCondition) Size() (n int) { +func (m *IngressStatus) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Type) + l = m.LoadBalancer.Size() n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) + return n +} + +func (m *IngressTLS) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Hosts) > 0 { + for _, s := range m.Hosts { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.SecretName) n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() + return n +} + +func (m *NetworkPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) + l = m.Spec.Size() n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) + l = m.Status.Size() n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *DaemonSetList) Size() (n int) { +func (m *NetworkPolicyEgressRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.To) > 0 { + for _, e := range m.To { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NetworkPolicyIngressRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.From) > 0 { + for _, e := range m.From { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NetworkPolicyList) Size() (n int) { if m == nil { return 0 } @@ -5188,70 +4426,92 @@ func (m *DaemonSetList) Size() (n int) { return n } -func (m *DaemonSetSpec) Size() (n int) { +func (m *NetworkPolicyPeer) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Selector != nil { - l = m.Selector.Size() + if m.PodSelector != nil { + l = m.PodSelector.Size() n += 1 + l + sovGenerated(uint64(l)) } - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.UpdateStrategy.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.MinReadySeconds)) - n += 1 + sovGenerated(uint64(m.TemplateGeneration)) - if m.RevisionHistoryLimit != nil { - n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.IPBlock != nil { + l = m.IPBlock.Size() + n += 1 + l + sovGenerated(uint64(l)) } return n } -func (m *DaemonSetStatus) Size() (n int) { +func (m *NetworkPolicyPort) Size() (n int) { if m == nil { return 0 } var l int _ = l - n += 1 + sovGenerated(uint64(m.CurrentNumberScheduled)) - n += 1 + sovGenerated(uint64(m.NumberMisscheduled)) - n += 1 + sovGenerated(uint64(m.DesiredNumberScheduled)) - n += 1 + sovGenerated(uint64(m.NumberReady)) - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - n += 1 + sovGenerated(uint64(m.UpdatedNumberScheduled)) - n += 1 + sovGenerated(uint64(m.NumberAvailable)) - n += 1 + sovGenerated(uint64(m.NumberUnavailable)) - if m.CollisionCount != nil { - n += 1 + sovGenerated(uint64(*m.CollisionCount)) + if m.Protocol != nil { + l = len(*m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) } - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { + if m.Port != nil { + l = m.Port.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.EndPort != nil { + n += 1 + sovGenerated(uint64(*m.EndPort)) + } + return n +} + +func (m *NetworkPolicySpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.PodSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ingress) > 0 { + for _, e := range m.Ingress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Egress) > 0 { + for _, e := range m.Egress { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } } + if len(m.PolicyTypes) > 0 { + for _, s := range m.PolicyTypes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } -func (m *DaemonSetUpdateStrategy) Size() (n int) { +func (m *NetworkPolicyStatus) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if m.RollingUpdate != nil { - l = m.RollingUpdate.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } return n } -func (m *Deployment) Size() (n int) { +func (m *ReplicaSet) Size() (n int) { if m == nil { return 0 } @@ -5266,7 +4526,7 @@ func (m *Deployment) Size() (n int) { return n } -func (m *DeploymentCondition) Size() (n int) { +func (m *ReplicaSetCondition) Size() (n int) { if m == nil { return 0 } @@ -5276,18 +4536,16 @@ func (m *DeploymentCondition) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Status) n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) l = len(m.Reason) n += 1 + l + sovGenerated(uint64(l)) l = len(m.Message) n += 1 + l + sovGenerated(uint64(l)) - l = m.LastUpdateTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *DeploymentList) Size() (n int) { +func (m *ReplicaSetList) Size() (n int) { if m == nil { return 0 } @@ -5304,28 +4562,7 @@ func (m *DeploymentList) Size() (n int) { return n } -func (m *DeploymentRollback) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.UpdatedAnnotations) > 0 { - for k, v := range m.UpdatedAnnotations { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = m.RollbackTo.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *DeploymentSpec) Size() (n int) { +func (m *ReplicaSetSpec) Size() (n int) { if m == nil { return 0 } @@ -5340,151 +4577,75 @@ func (m *DeploymentSpec) Size() (n int) { } l = m.Template.Size() n += 1 + l + sovGenerated(uint64(l)) - l = m.Strategy.Size() - n += 1 + l + sovGenerated(uint64(l)) n += 1 + sovGenerated(uint64(m.MinReadySeconds)) - if m.RevisionHistoryLimit != nil { - n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) - } - n += 2 - if m.RollbackTo != nil { - l = m.RollbackTo.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ProgressDeadlineSeconds != nil { - n += 1 + sovGenerated(uint64(*m.ProgressDeadlineSeconds)) - } return n } -func (m *DeploymentStatus) Size() (n int) { +func (m *ReplicaSetStatus) Size() (n int) { if m == nil { return 0 } var l int _ = l - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) n += 1 + sovGenerated(uint64(m.Replicas)) - n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) + n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) n += 1 + sovGenerated(uint64(m.AvailableReplicas)) - n += 1 + sovGenerated(uint64(m.UnavailableReplicas)) if len(m.Conditions) > 0 { for _, e := range m.Conditions { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } } - n += 1 + sovGenerated(uint64(m.ReadyReplicas)) - if m.CollisionCount != nil { - n += 1 + sovGenerated(uint64(*m.CollisionCount)) - } return n } -func (m *DeploymentStrategy) Size() (n int) { +func (m *RollbackConfig) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if m.RollingUpdate != nil { - l = m.RollingUpdate.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + n += 1 + sovGenerated(uint64(m.Revision)) return n } -func (m *FSGroupStrategyOptions) Size() (n int) { +func (m *RollingUpdateDaemonSet) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *HTTPIngressPath) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Backend.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.PathType != nil { - l = len(*m.PathType) + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() n += 1 + l + sovGenerated(uint64(l)) } - return n -} - -func (m *HTTPIngressRuleValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Paths) > 0 { - for _, e := range m.Paths { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *HostPortRange) Size() (n int) { - if m == nil { - return 0 + if m.MaxSurge != nil { + l = m.MaxSurge.Size() + n += 1 + l + sovGenerated(uint64(l)) } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Min)) - n += 1 + sovGenerated(uint64(m.Max)) return n } -func (m *IDRange) Size() (n int) { +func (m *RollingUpdateDeployment) Size() (n int) { if m == nil { return 0 } var l int _ = l - n += 1 + sovGenerated(uint64(m.Min)) - n += 1 + sovGenerated(uint64(m.Max)) - return n -} - -func (m *IPBlock) Size() (n int) { - if m == nil { - return 0 + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() + n += 1 + l + sovGenerated(uint64(l)) } - var l int - _ = l - l = len(m.CIDR) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Except) > 0 { - for _, s := range m.Except { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + if m.MaxSurge != nil { + l = m.MaxSurge.Size() + n += 1 + l + sovGenerated(uint64(l)) } return n } -func (m *Ingress) Size() (n int) { +func (m *Scale) Size() (n int) { if m == nil { return 0 } @@ -5499,4006 +4660,729 @@ func (m *Ingress) Size() (n int) { return n } -func (m *IngressBackend) Size() (n int) { +func (m *ScaleSpec) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.ServiceName) - n += 1 + l + sovGenerated(uint64(l)) - l = m.ServicePort.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Resource != nil { - l = m.Resource.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + n += 1 + sovGenerated(uint64(m.Replicas)) return n } -func (m *IngressList) Size() (n int) { +func (m *ScaleStatus) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Replicas)) + if len(m.Selector) > 0 { + for k, v := range m.Selector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } - return n -} - -func (m *IngressLoadBalancerIngress) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.IP) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Hostname) + l = len(m.TargetSelector) n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ports) > 0 { - for _, e := range m.Ports { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } return n } -func (m *IngressLoadBalancerStatus) Size() (n int) { - if m == nil { - return 0 +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *DaemonSet) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if len(m.Ingress) > 0 { - for _, e := range m.Ingress { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&DaemonSet{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DaemonSetSpec", "DaemonSetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DaemonSetStatus", "DaemonSetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetCondition) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&DaemonSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s } - -func (m *IngressPortStatus) Size() (n int) { - if m == nil { - return 0 +func (this *DaemonSetList) String() string { + if this == nil { + return "nil" } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Port)) - l = len(m.Protocol) - n += 1 + l + sovGenerated(uint64(l)) - if m.Error != nil { - l = len(*m.Error) - n += 1 + l + sovGenerated(uint64(l)) + repeatedStringForItems := "[]DaemonSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + "," } - return n + repeatedStringForItems += "}" + s := strings.Join([]string{`&DaemonSetList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s } - -func (m *IngressRule) Size() (n int) { - if m == nil { - return 0 +func (this *DaemonSetSpec) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Host) - n += 1 + l + sovGenerated(uint64(l)) - l = m.IngressRuleValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&DaemonSetSpec{`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "DaemonSetUpdateStrategy", "DaemonSetUpdateStrategy", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `TemplateGeneration:` + fmt.Sprintf("%v", this.TemplateGeneration) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `}`, + }, "") + return s } - -func (m *IngressRuleValue) Size() (n int) { - if m == nil { - return 0 +func (this *DaemonSetStatus) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if m.HTTP != nil { - l = m.HTTP.Size() - n += 1 + l + sovGenerated(uint64(l)) + repeatedStringForConditions := "[]DaemonSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + "," } - return n + repeatedStringForConditions += "}" + s := strings.Join([]string{`&DaemonSetStatus{`, + `CurrentNumberScheduled:` + fmt.Sprintf("%v", this.CurrentNumberScheduled) + `,`, + `NumberMisscheduled:` + fmt.Sprintf("%v", this.NumberMisscheduled) + `,`, + `DesiredNumberScheduled:` + fmt.Sprintf("%v", this.DesiredNumberScheduled) + `,`, + `NumberReady:` + fmt.Sprintf("%v", this.NumberReady) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `UpdatedNumberScheduled:` + fmt.Sprintf("%v", this.UpdatedNumberScheduled) + `,`, + `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, + `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, + `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s } - -func (m *IngressSpec) Size() (n int) { - if m == nil { - return 0 +func (this *DaemonSetUpdateStrategy) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if m.Backend != nil { - l = m.Backend.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&DaemonSetUpdateStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Deployment) String() string { + if this == nil { + return "nil" } - if len(m.TLS) > 0 { - for _, e := range m.TLS { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Rules) > 0 { - for _, e := range m.Rules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.IngressClassName != nil { - l = len(*m.IngressClassName) - n += 1 + l + sovGenerated(uint64(l)) - } - return n + s := strings.Join([]string{`&Deployment{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *IngressStatus) Size() (n int) { - if m == nil { - return 0 +func (this *DeploymentCondition) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = m.LoadBalancer.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&DeploymentCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *IngressTLS) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Hosts) > 0 { - for _, s := range m.Hosts { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } +func (this *DeploymentList) String() string { + if this == nil { + return "nil" } - l = len(m.SecretName) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *NetworkPolicy) Size() (n int) { - if m == nil { - return 0 + repeatedStringForItems := "[]Deployment{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Deployment", "Deployment", 1), `&`, ``, 1) + "," } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + repeatedStringForItems += "}" + s := strings.Join([]string{`&DeploymentList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicyEgressRule) Size() (n int) { - if m == nil { - return 0 +func (this *DeploymentRollback) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if len(m.Ports) > 0 { - for _, e := range m.Ports { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + keysForUpdatedAnnotations := make([]string, 0, len(this.UpdatedAnnotations)) + for k := range this.UpdatedAnnotations { + keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, k) } - if len(m.To) > 0 { - for _, e := range m.To { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) + mapStringForUpdatedAnnotations := "map[string]string{" + for _, k := range keysForUpdatedAnnotations { + mapStringForUpdatedAnnotations += fmt.Sprintf("%v: %v,", k, this.UpdatedAnnotations[k]) } - return n + mapStringForUpdatedAnnotations += "}" + s := strings.Join([]string{`&DeploymentRollback{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UpdatedAnnotations:` + mapStringForUpdatedAnnotations + `,`, + `RollbackTo:` + strings.Replace(strings.Replace(this.RollbackTo.String(), "RollbackConfig", "RollbackConfig", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicyIngressRule) Size() (n int) { - if m == nil { - return 0 +func (this *DeploymentSpec) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if len(m.Ports) > 0 { - for _, e := range m.Ports { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&DeploymentSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, + `RollbackTo:` + strings.Replace(this.RollbackTo.String(), "RollbackConfig", "RollbackConfig", 1) + `,`, + `ProgressDeadlineSeconds:` + valueToStringGenerated(this.ProgressDeadlineSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentStatus) String() string { + if this == nil { + return "nil" } - if len(m.From) > 0 { - for _, e := range m.From { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForConditions := "[]DeploymentCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + "," } - return n + repeatedStringForConditions += "}" + s := strings.Join([]string{`&DeploymentStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicyList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (this *DeploymentStrategy) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&DeploymentStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicyPeer) Size() (n int) { - if m == nil { - return 0 +func (this *HTTPIngressPath) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if m.PodSelector != nil { - l = m.PodSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&HTTPIngressPath{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Backend:` + strings.Replace(strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1), `&`, ``, 1) + `,`, + `PathType:` + valueToStringGenerated(this.PathType) + `,`, + `}`, + }, "") + return s +} +func (this *HTTPIngressRuleValue) String() string { + if this == nil { + return "nil" } - if m.NamespaceSelector != nil { - l = m.NamespaceSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) + repeatedStringForPaths := "[]HTTPIngressPath{" + for _, f := range this.Paths { + repeatedStringForPaths += strings.Replace(strings.Replace(f.String(), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + "," } - if m.IPBlock != nil { - l = m.IPBlock.Size() - n += 1 + l + sovGenerated(uint64(l)) + repeatedStringForPaths += "}" + s := strings.Join([]string{`&HTTPIngressRuleValue{`, + `Paths:` + repeatedStringForPaths + `,`, + `}`, + }, "") + return s +} +func (this *IPBlock) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&IPBlock{`, + `CIDR:` + fmt.Sprintf("%v", this.CIDR) + `,`, + `Except:` + fmt.Sprintf("%v", this.Except) + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicyPort) Size() (n int) { - if m == nil { - return 0 +func (this *Ingress) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if m.Protocol != nil { - l = len(*m.Protocol) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Port != nil { - l = m.Port.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.EndPort != nil { - n += 1 + sovGenerated(uint64(*m.EndPort)) + s := strings.Join([]string{`&Ingress{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IngressSpec", "IngressSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "IngressStatus", "IngressStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressBackend) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&IngressBackend{`, + `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, + `ServicePort:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ServicePort), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "TypedLocalObjectReference", "v11.TypedLocalObjectReference", 1) + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicySpec) Size() (n int) { - if m == nil { - return 0 +func (this *IngressList) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = m.PodSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ingress) > 0 { - for _, e := range m.Ingress { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForItems := "[]Ingress{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Ingress", "Ingress", 1), `&`, ``, 1) + "," } - if len(m.Egress) > 0 { - for _, e := range m.Egress { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForItems += "}" + s := strings.Join([]string{`&IngressList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *IngressLoadBalancerIngress) String() string { + if this == nil { + return "nil" } - if len(m.PolicyTypes) > 0 { - for _, s := range m.PolicyTypes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForPorts := "[]IngressPortStatus{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "IngressPortStatus", "IngressPortStatus", 1), `&`, ``, 1) + "," } - return n + repeatedStringForPorts += "}" + s := strings.Join([]string{`&IngressLoadBalancerIngress{`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicyStatus) Size() (n int) { - if m == nil { - return 0 +func (this *IngressLoadBalancerStatus) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForIngress := "[]IngressLoadBalancerIngress{" + for _, f := range this.Ingress { + repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "IngressLoadBalancerIngress", "IngressLoadBalancerIngress", 1), `&`, ``, 1) + "," } - return n + repeatedStringForIngress += "}" + s := strings.Join([]string{`&IngressLoadBalancerStatus{`, + `Ingress:` + repeatedStringForIngress + `,`, + `}`, + }, "") + return s } - -func (m *PodSecurityPolicy) Size() (n int) { - if m == nil { - return 0 +func (this *IngressPortStatus) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&IngressPortStatus{`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `Error:` + valueToStringGenerated(this.Error) + `,`, + `}`, + }, "") + return s } - -func (m *PodSecurityPolicyList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (this *IngressRule) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&IngressRule{`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `IngressRuleValue:` + strings.Replace(strings.Replace(this.IngressRuleValue.String(), "IngressRuleValue", "IngressRuleValue", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *PodSecurityPolicySpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 2 - if len(m.DefaultAddCapabilities) > 0 { - for _, s := range m.DefaultAddCapabilities { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } +func (this *IngressRuleValue) String() string { + if this == nil { + return "nil" } - if len(m.RequiredDropCapabilities) > 0 { - for _, s := range m.RequiredDropCapabilities { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&IngressRuleValue{`, + `HTTP:` + strings.Replace(this.HTTP.String(), "HTTPIngressRuleValue", "HTTPIngressRuleValue", 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressSpec) String() string { + if this == nil { + return "nil" } - if len(m.AllowedCapabilities) > 0 { - for _, s := range m.AllowedCapabilities { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForTLS := "[]IngressTLS{" + for _, f := range this.TLS { + repeatedStringForTLS += strings.Replace(strings.Replace(f.String(), "IngressTLS", "IngressTLS", 1), `&`, ``, 1) + "," } - if len(m.Volumes) > 0 { - for _, s := range m.Volumes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForTLS += "}" + repeatedStringForRules := "[]IngressRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "IngressRule", "IngressRule", 1), `&`, ``, 1) + "," } - n += 2 - if len(m.HostPorts) > 0 { - for _, e := range m.HostPorts { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForRules += "}" + s := strings.Join([]string{`&IngressSpec{`, + `Backend:` + strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1) + `,`, + `TLS:` + repeatedStringForTLS + `,`, + `Rules:` + repeatedStringForRules + `,`, + `IngressClassName:` + valueToStringGenerated(this.IngressClassName) + `,`, + `}`, + }, "") + return s +} +func (this *IngressStatus) String() string { + if this == nil { + return "nil" } - n += 2 - n += 2 - l = m.SELinux.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.RunAsUser.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.SupplementalGroups.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.FSGroup.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.DefaultAllowPrivilegeEscalation != nil { - n += 2 + s := strings.Join([]string{`&IngressStatus{`, + `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "IngressLoadBalancerStatus", "IngressLoadBalancerStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressTLS) String() string { + if this == nil { + return "nil" } - if m.AllowPrivilegeEscalation != nil { - n += 3 + s := strings.Join([]string{`&IngressTLS{`, + `Hosts:` + fmt.Sprintf("%v", this.Hosts) + `,`, + `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicy) String() string { + if this == nil { + return "nil" } - if len(m.AllowedHostPaths) > 0 { - for _, e := range m.AllowedHostPaths { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&NetworkPolicy{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NetworkPolicySpec", "NetworkPolicySpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NetworkPolicyStatus", "NetworkPolicyStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicyEgressRule) String() string { + if this == nil { + return "nil" } - if len(m.AllowedFlexVolumes) > 0 { - for _, e := range m.AllowedFlexVolumes { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } + repeatedStringForPorts := "[]NetworkPolicyPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," } - if len(m.AllowedUnsafeSysctls) > 0 { - for _, s := range m.AllowedUnsafeSysctls { - l = len(s) - n += 2 + l + sovGenerated(uint64(l)) - } + repeatedStringForPorts += "}" + repeatedStringForTo := "[]NetworkPolicyPeer{" + for _, f := range this.To { + repeatedStringForTo += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," } - if len(m.ForbiddenSysctls) > 0 { - for _, s := range m.ForbiddenSysctls { - l = len(s) - n += 2 + l + sovGenerated(uint64(l)) - } - } - if len(m.AllowedProcMountTypes) > 0 { - for _, s := range m.AllowedProcMountTypes { - l = len(s) - n += 2 + l + sovGenerated(uint64(l)) - } - } - if m.RunAsGroup != nil { - l = m.RunAsGroup.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if len(m.AllowedCSIDrivers) > 0 { - for _, e := range m.AllowedCSIDrivers { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - if m.RuntimeClass != nil { - l = m.RuntimeClass.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ReplicaSet) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + repeatedStringForTo += "}" + s := strings.Join([]string{`&NetworkPolicyEgressRule{`, + `Ports:` + repeatedStringForPorts + `,`, + `To:` + repeatedStringForTo + `,`, + `}`, + }, "") + return s } - -func (m *ReplicaSetCondition) Size() (n int) { - if m == nil { - return 0 +func (this *NetworkPolicyIngressRule) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ReplicaSetList) Size() (n int) { - if m == nil { - return 0 + repeatedStringForPorts := "[]NetworkPolicyPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForPorts += "}" + repeatedStringForFrom := "[]NetworkPolicyPeer{" + for _, f := range this.From { + repeatedStringForFrom += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," } - return n + repeatedStringForFrom += "}" + s := strings.Join([]string{`&NetworkPolicyIngressRule{`, + `Ports:` + repeatedStringForPorts + `,`, + `From:` + repeatedStringForFrom + `,`, + `}`, + }, "") + return s } - -func (m *ReplicaSetSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Replicas != nil { - n += 1 + sovGenerated(uint64(*m.Replicas)) +func (this *NetworkPolicyList) String() string { + if this == nil { + return "nil" } - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) + repeatedStringForItems := "[]NetworkPolicy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "NetworkPolicy", "NetworkPolicy", 1), `&`, ``, 1) + "," } - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.MinReadySeconds)) - return n + repeatedStringForItems += "}" + s := strings.Join([]string{`&NetworkPolicyList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s } - -func (m *ReplicaSetStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Replicas)) - n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas)) - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - n += 1 + sovGenerated(uint64(m.ReadyReplicas)) - n += 1 + sovGenerated(uint64(m.AvailableReplicas)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (this *NetworkPolicyPeer) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&NetworkPolicyPeer{`, + `PodSelector:` + strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `IPBlock:` + strings.Replace(this.IPBlock.String(), "IPBlock", "IPBlock", 1) + `,`, + `}`, + }, "") + return s } - -func (m *RollbackConfig) Size() (n int) { - if m == nil { - return 0 +func (this *NetworkPolicyPort) String() string { + if this == nil { + return "nil" } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Revision)) - return n + s := strings.Join([]string{`&NetworkPolicyPort{`, + `Protocol:` + valueToStringGenerated(this.Protocol) + `,`, + `Port:` + strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "intstr.IntOrString", 1) + `,`, + `EndPort:` + valueToStringGenerated(this.EndPort) + `,`, + `}`, + }, "") + return s } - -func (m *RollingUpdateDaemonSet) Size() (n int) { - if m == nil { - return 0 +func (this *NetworkPolicySpec) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if m.MaxUnavailable != nil { - l = m.MaxUnavailable.Size() - n += 1 + l + sovGenerated(uint64(l)) + repeatedStringForIngress := "[]NetworkPolicyIngressRule{" + for _, f := range this.Ingress { + repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyIngressRule", "NetworkPolicyIngressRule", 1), `&`, ``, 1) + "," } - if m.MaxSurge != nil { - l = m.MaxSurge.Size() - n += 1 + l + sovGenerated(uint64(l)) + repeatedStringForIngress += "}" + repeatedStringForEgress := "[]NetworkPolicyEgressRule{" + for _, f := range this.Egress { + repeatedStringForEgress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyEgressRule", "NetworkPolicyEgressRule", 1), `&`, ``, 1) + "," } - return n + repeatedStringForEgress += "}" + s := strings.Join([]string{`&NetworkPolicySpec{`, + `PodSelector:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `Ingress:` + repeatedStringForIngress + `,`, + `Egress:` + repeatedStringForEgress + `,`, + `PolicyTypes:` + fmt.Sprintf("%v", this.PolicyTypes) + `,`, + `}`, + }, "") + return s } - -func (m *RollingUpdateDeployment) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.MaxUnavailable != nil { - l = m.MaxUnavailable.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (this *NetworkPolicyStatus) String() string { + if this == nil { + return "nil" } - if m.MaxSurge != nil { - l = m.MaxSurge.Size() - n += 1 + l + sovGenerated(uint64(l)) + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += fmt.Sprintf("%v", f) + "," } - return n + repeatedStringForConditions += "}" + s := strings.Join([]string{`&NetworkPolicyStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s } - -func (m *RunAsGroupStrategyOptions) Size() (n int) { - if m == nil { - return 0 +func (this *ReplicaSet) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&ReplicaSet{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicaSetSpec", "ReplicaSetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicaSetStatus", "ReplicaSetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetCondition) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&ReplicaSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s } - -func (m *RunAsUserStrategyOptions) Size() (n int) { - if m == nil { - return 0 +func (this *ReplicaSetList) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *RuntimeClassStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.AllowedRuntimeClassNames) > 0 { - for _, s := range m.AllowedRuntimeClassNames { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.DefaultRuntimeClassName != nil { - l = len(*m.DefaultRuntimeClassName) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *SELinuxStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if m.SELinuxOptions != nil { - l = m.SELinuxOptions.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *Scale) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ScaleSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Replicas)) - return n -} - -func (m *ScaleStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Replicas)) - if len(m.Selector) > 0 { - for k, v := range m.Selector { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = len(m.TargetSelector) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *SupplementalGroupsStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *AllowedCSIDriver) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AllowedCSIDriver{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s -} -func (this *AllowedFlexVolume) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AllowedFlexVolume{`, - `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, - `}`, - }, "") - return s -} -func (this *AllowedHostPath) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AllowedHostPath{`, - `PathPrefix:` + fmt.Sprintf("%v", this.PathPrefix) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSet) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DaemonSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DaemonSetSpec", "DaemonSetSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DaemonSetStatus", "DaemonSetStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSetCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DaemonSetCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSetList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]DaemonSet{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&DaemonSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSetSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DaemonSetSpec{`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "DaemonSetUpdateStrategy", "DaemonSetUpdateStrategy", 1), `&`, ``, 1) + `,`, - `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, - `TemplateGeneration:` + fmt.Sprintf("%v", this.TemplateGeneration) + `,`, - `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSetStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForConditions := "[]DaemonSetCondition{" - for _, f := range this.Conditions { - repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + "," - } - repeatedStringForConditions += "}" - s := strings.Join([]string{`&DaemonSetStatus{`, - `CurrentNumberScheduled:` + fmt.Sprintf("%v", this.CurrentNumberScheduled) + `,`, - `NumberMisscheduled:` + fmt.Sprintf("%v", this.NumberMisscheduled) + `,`, - `DesiredNumberScheduled:` + fmt.Sprintf("%v", this.DesiredNumberScheduled) + `,`, - `NumberReady:` + fmt.Sprintf("%v", this.NumberReady) + `,`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `UpdatedNumberScheduled:` + fmt.Sprintf("%v", this.UpdatedNumberScheduled) + `,`, - `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, - `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, - `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `Conditions:` + repeatedStringForConditions + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSetUpdateStrategy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DaemonSetUpdateStrategy{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Deployment) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Deployment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeploymentCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]Deployment{" + repeatedStringForItems := "[]ReplicaSet{" for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Deployment", "Deployment", 1), `&`, ``, 1) + "," + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + "," } repeatedStringForItems += "}" - s := strings.Join([]string{`&DeploymentList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentRollback) String() string { - if this == nil { - return "nil" - } - keysForUpdatedAnnotations := make([]string, 0, len(this.UpdatedAnnotations)) - for k := range this.UpdatedAnnotations { - keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) - mapStringForUpdatedAnnotations := "map[string]string{" - for _, k := range keysForUpdatedAnnotations { - mapStringForUpdatedAnnotations += fmt.Sprintf("%v: %v,", k, this.UpdatedAnnotations[k]) - } - mapStringForUpdatedAnnotations += "}" - s := strings.Join([]string{`&DeploymentRollback{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `UpdatedAnnotations:` + mapStringForUpdatedAnnotations + `,`, - `RollbackTo:` + strings.Replace(strings.Replace(this.RollbackTo.String(), "RollbackConfig", "RollbackConfig", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeploymentSpec{`, - `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, - `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, - `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, - `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, - `RollbackTo:` + strings.Replace(this.RollbackTo.String(), "RollbackConfig", "RollbackConfig", 1) + `,`, - `ProgressDeadlineSeconds:` + valueToStringGenerated(this.ProgressDeadlineSeconds) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForConditions := "[]DeploymentCondition{" - for _, f := range this.Conditions { - repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + "," - } - repeatedStringForConditions += "}" - s := strings.Join([]string{`&DeploymentStatus{`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, - `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, - `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, - `Conditions:` + repeatedStringForConditions + `,`, - `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, - `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentStrategy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeploymentStrategy{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, - `}`, - }, "") - return s -} -func (this *FSGroupStrategyOptions) String() string { - if this == nil { - return "nil" - } - repeatedStringForRanges := "[]IDRange{" - for _, f := range this.Ranges { - repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," - } - repeatedStringForRanges += "}" - s := strings.Join([]string{`&FSGroupStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + repeatedStringForRanges + `,`, - `}`, - }, "") - return s -} -func (this *HTTPIngressPath) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HTTPIngressPath{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Backend:` + strings.Replace(strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1), `&`, ``, 1) + `,`, - `PathType:` + valueToStringGenerated(this.PathType) + `,`, - `}`, - }, "") - return s -} -func (this *HTTPIngressRuleValue) String() string { - if this == nil { - return "nil" - } - repeatedStringForPaths := "[]HTTPIngressPath{" - for _, f := range this.Paths { - repeatedStringForPaths += strings.Replace(strings.Replace(f.String(), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + "," - } - repeatedStringForPaths += "}" - s := strings.Join([]string{`&HTTPIngressRuleValue{`, - `Paths:` + repeatedStringForPaths + `,`, - `}`, - }, "") - return s -} -func (this *HostPortRange) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HostPortRange{`, - `Min:` + fmt.Sprintf("%v", this.Min) + `,`, - `Max:` + fmt.Sprintf("%v", this.Max) + `,`, - `}`, - }, "") - return s -} -func (this *IDRange) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IDRange{`, - `Min:` + fmt.Sprintf("%v", this.Min) + `,`, - `Max:` + fmt.Sprintf("%v", this.Max) + `,`, - `}`, - }, "") - return s -} -func (this *IPBlock) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IPBlock{`, - `CIDR:` + fmt.Sprintf("%v", this.CIDR) + `,`, - `Except:` + fmt.Sprintf("%v", this.Except) + `,`, - `}`, - }, "") - return s -} -func (this *Ingress) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Ingress{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IngressSpec", "IngressSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "IngressStatus", "IngressStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressBackend) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressBackend{`, - `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, - `ServicePort:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ServicePort), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, - `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "TypedLocalObjectReference", "v11.TypedLocalObjectReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]Ingress{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Ingress", "Ingress", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&IngressList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *IngressLoadBalancerIngress) String() string { - if this == nil { - return "nil" - } - repeatedStringForPorts := "[]IngressPortStatus{" - for _, f := range this.Ports { - repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "IngressPortStatus", "IngressPortStatus", 1), `&`, ``, 1) + "," - } - repeatedStringForPorts += "}" - s := strings.Join([]string{`&IngressLoadBalancerIngress{`, - `IP:` + fmt.Sprintf("%v", this.IP) + `,`, - `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, - `Ports:` + repeatedStringForPorts + `,`, - `}`, - }, "") - return s -} -func (this *IngressLoadBalancerStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForIngress := "[]IngressLoadBalancerIngress{" - for _, f := range this.Ingress { - repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "IngressLoadBalancerIngress", "IngressLoadBalancerIngress", 1), `&`, ``, 1) + "," - } - repeatedStringForIngress += "}" - s := strings.Join([]string{`&IngressLoadBalancerStatus{`, - `Ingress:` + repeatedStringForIngress + `,`, - `}`, - }, "") - return s -} -func (this *IngressPortStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressPortStatus{`, - `Port:` + fmt.Sprintf("%v", this.Port) + `,`, - `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, - `Error:` + valueToStringGenerated(this.Error) + `,`, - `}`, - }, "") - return s -} -func (this *IngressRule) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressRule{`, - `Host:` + fmt.Sprintf("%v", this.Host) + `,`, - `IngressRuleValue:` + strings.Replace(strings.Replace(this.IngressRuleValue.String(), "IngressRuleValue", "IngressRuleValue", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressRuleValue) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressRuleValue{`, - `HTTP:` + strings.Replace(this.HTTP.String(), "HTTPIngressRuleValue", "HTTPIngressRuleValue", 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressSpec) String() string { - if this == nil { - return "nil" - } - repeatedStringForTLS := "[]IngressTLS{" - for _, f := range this.TLS { - repeatedStringForTLS += strings.Replace(strings.Replace(f.String(), "IngressTLS", "IngressTLS", 1), `&`, ``, 1) + "," - } - repeatedStringForTLS += "}" - repeatedStringForRules := "[]IngressRule{" - for _, f := range this.Rules { - repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "IngressRule", "IngressRule", 1), `&`, ``, 1) + "," - } - repeatedStringForRules += "}" - s := strings.Join([]string{`&IngressSpec{`, - `Backend:` + strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1) + `,`, - `TLS:` + repeatedStringForTLS + `,`, - `Rules:` + repeatedStringForRules + `,`, - `IngressClassName:` + valueToStringGenerated(this.IngressClassName) + `,`, - `}`, - }, "") - return s -} -func (this *IngressStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressStatus{`, - `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "IngressLoadBalancerStatus", "IngressLoadBalancerStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressTLS) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressTLS{`, - `Hosts:` + fmt.Sprintf("%v", this.Hosts) + `,`, - `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NetworkPolicy{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NetworkPolicySpec", "NetworkPolicySpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NetworkPolicyStatus", "NetworkPolicyStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyEgressRule) String() string { - if this == nil { - return "nil" - } - repeatedStringForPorts := "[]NetworkPolicyPort{" - for _, f := range this.Ports { - repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," - } - repeatedStringForPorts += "}" - repeatedStringForTo := "[]NetworkPolicyPeer{" - for _, f := range this.To { - repeatedStringForTo += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," - } - repeatedStringForTo += "}" - s := strings.Join([]string{`&NetworkPolicyEgressRule{`, - `Ports:` + repeatedStringForPorts + `,`, - `To:` + repeatedStringForTo + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyIngressRule) String() string { - if this == nil { - return "nil" - } - repeatedStringForPorts := "[]NetworkPolicyPort{" - for _, f := range this.Ports { - repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," - } - repeatedStringForPorts += "}" - repeatedStringForFrom := "[]NetworkPolicyPeer{" - for _, f := range this.From { - repeatedStringForFrom += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," - } - repeatedStringForFrom += "}" - s := strings.Join([]string{`&NetworkPolicyIngressRule{`, - `Ports:` + repeatedStringForPorts + `,`, - `From:` + repeatedStringForFrom + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]NetworkPolicy{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "NetworkPolicy", "NetworkPolicy", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&NetworkPolicyList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyPeer) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NetworkPolicyPeer{`, - `PodSelector:` + strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `IPBlock:` + strings.Replace(this.IPBlock.String(), "IPBlock", "IPBlock", 1) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyPort) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NetworkPolicyPort{`, - `Protocol:` + valueToStringGenerated(this.Protocol) + `,`, - `Port:` + strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "intstr.IntOrString", 1) + `,`, - `EndPort:` + valueToStringGenerated(this.EndPort) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicySpec) String() string { - if this == nil { - return "nil" - } - repeatedStringForIngress := "[]NetworkPolicyIngressRule{" - for _, f := range this.Ingress { - repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyIngressRule", "NetworkPolicyIngressRule", 1), `&`, ``, 1) + "," - } - repeatedStringForIngress += "}" - repeatedStringForEgress := "[]NetworkPolicyEgressRule{" - for _, f := range this.Egress { - repeatedStringForEgress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyEgressRule", "NetworkPolicyEgressRule", 1), `&`, ``, 1) + "," - } - repeatedStringForEgress += "}" - s := strings.Join([]string{`&NetworkPolicySpec{`, - `PodSelector:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1), `&`, ``, 1) + `,`, - `Ingress:` + repeatedStringForIngress + `,`, - `Egress:` + repeatedStringForEgress + `,`, - `PolicyTypes:` + fmt.Sprintf("%v", this.PolicyTypes) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForConditions := "[]Condition{" - for _, f := range this.Conditions { - repeatedStringForConditions += fmt.Sprintf("%v", f) + "," - } - repeatedStringForConditions += "}" - s := strings.Join([]string{`&NetworkPolicyStatus{`, - `Conditions:` + repeatedStringForConditions + `,`, - `}`, - }, "") - return s -} -func (this *PodSecurityPolicy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodSecurityPolicy{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSecurityPolicySpec", "PodSecurityPolicySpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodSecurityPolicyList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]PodSecurityPolicy{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodSecurityPolicy", "PodSecurityPolicy", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&PodSecurityPolicyList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *PodSecurityPolicySpec) String() string { - if this == nil { - return "nil" - } - repeatedStringForHostPorts := "[]HostPortRange{" - for _, f := range this.HostPorts { - repeatedStringForHostPorts += strings.Replace(strings.Replace(f.String(), "HostPortRange", "HostPortRange", 1), `&`, ``, 1) + "," - } - repeatedStringForHostPorts += "}" - repeatedStringForAllowedHostPaths := "[]AllowedHostPath{" - for _, f := range this.AllowedHostPaths { - repeatedStringForAllowedHostPaths += strings.Replace(strings.Replace(f.String(), "AllowedHostPath", "AllowedHostPath", 1), `&`, ``, 1) + "," - } - repeatedStringForAllowedHostPaths += "}" - repeatedStringForAllowedFlexVolumes := "[]AllowedFlexVolume{" - for _, f := range this.AllowedFlexVolumes { - repeatedStringForAllowedFlexVolumes += strings.Replace(strings.Replace(f.String(), "AllowedFlexVolume", "AllowedFlexVolume", 1), `&`, ``, 1) + "," - } - repeatedStringForAllowedFlexVolumes += "}" - repeatedStringForAllowedCSIDrivers := "[]AllowedCSIDriver{" - for _, f := range this.AllowedCSIDrivers { - repeatedStringForAllowedCSIDrivers += strings.Replace(strings.Replace(f.String(), "AllowedCSIDriver", "AllowedCSIDriver", 1), `&`, ``, 1) + "," - } - repeatedStringForAllowedCSIDrivers += "}" - s := strings.Join([]string{`&PodSecurityPolicySpec{`, - `Privileged:` + fmt.Sprintf("%v", this.Privileged) + `,`, - `DefaultAddCapabilities:` + fmt.Sprintf("%v", this.DefaultAddCapabilities) + `,`, - `RequiredDropCapabilities:` + fmt.Sprintf("%v", this.RequiredDropCapabilities) + `,`, - `AllowedCapabilities:` + fmt.Sprintf("%v", this.AllowedCapabilities) + `,`, - `Volumes:` + fmt.Sprintf("%v", this.Volumes) + `,`, - `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, - `HostPorts:` + repeatedStringForHostPorts + `,`, - `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, - `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, - `SELinux:` + strings.Replace(strings.Replace(this.SELinux.String(), "SELinuxStrategyOptions", "SELinuxStrategyOptions", 1), `&`, ``, 1) + `,`, - `RunAsUser:` + strings.Replace(strings.Replace(this.RunAsUser.String(), "RunAsUserStrategyOptions", "RunAsUserStrategyOptions", 1), `&`, ``, 1) + `,`, - `SupplementalGroups:` + strings.Replace(strings.Replace(this.SupplementalGroups.String(), "SupplementalGroupsStrategyOptions", "SupplementalGroupsStrategyOptions", 1), `&`, ``, 1) + `,`, - `FSGroup:` + strings.Replace(strings.Replace(this.FSGroup.String(), "FSGroupStrategyOptions", "FSGroupStrategyOptions", 1), `&`, ``, 1) + `,`, - `ReadOnlyRootFilesystem:` + fmt.Sprintf("%v", this.ReadOnlyRootFilesystem) + `,`, - `DefaultAllowPrivilegeEscalation:` + valueToStringGenerated(this.DefaultAllowPrivilegeEscalation) + `,`, - `AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`, - `AllowedHostPaths:` + repeatedStringForAllowedHostPaths + `,`, - `AllowedFlexVolumes:` + repeatedStringForAllowedFlexVolumes + `,`, - `AllowedUnsafeSysctls:` + fmt.Sprintf("%v", this.AllowedUnsafeSysctls) + `,`, - `ForbiddenSysctls:` + fmt.Sprintf("%v", this.ForbiddenSysctls) + `,`, - `AllowedProcMountTypes:` + fmt.Sprintf("%v", this.AllowedProcMountTypes) + `,`, - `RunAsGroup:` + strings.Replace(this.RunAsGroup.String(), "RunAsGroupStrategyOptions", "RunAsGroupStrategyOptions", 1) + `,`, - `AllowedCSIDrivers:` + repeatedStringForAllowedCSIDrivers + `,`, - `RuntimeClass:` + strings.Replace(this.RuntimeClass.String(), "RuntimeClassStrategyOptions", "RuntimeClassStrategyOptions", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSet) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicaSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicaSetSpec", "ReplicaSetSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicaSetStatus", "ReplicaSetStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSetCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicaSetCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSetList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]ReplicaSet{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ReplicaSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSetSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicaSetSpec{`, - `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSetStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForConditions := "[]ReplicaSetCondition{" - for _, f := range this.Conditions { - repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + "," - } - repeatedStringForConditions += "}" - s := strings.Join([]string{`&ReplicaSetStatus{`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, - `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, - `Conditions:` + repeatedStringForConditions + `,`, - `}`, - }, "") - return s -} -func (this *RollbackConfig) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RollbackConfig{`, - `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, - `}`, - }, "") - return s -} -func (this *RollingUpdateDaemonSet) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RollingUpdateDaemonSet{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, - `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, - `}`, - }, "") - return s -} -func (this *RollingUpdateDeployment) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RollingUpdateDeployment{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, - `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, - `}`, - }, "") - return s -} -func (this *RunAsGroupStrategyOptions) String() string { - if this == nil { - return "nil" - } - repeatedStringForRanges := "[]IDRange{" - for _, f := range this.Ranges { - repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," - } - repeatedStringForRanges += "}" - s := strings.Join([]string{`&RunAsGroupStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + repeatedStringForRanges + `,`, - `}`, - }, "") - return s -} -func (this *RunAsUserStrategyOptions) String() string { - if this == nil { - return "nil" - } - repeatedStringForRanges := "[]IDRange{" - for _, f := range this.Ranges { - repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," - } - repeatedStringForRanges += "}" - s := strings.Join([]string{`&RunAsUserStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + repeatedStringForRanges + `,`, - `}`, - }, "") - return s -} -func (this *RuntimeClassStrategyOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RuntimeClassStrategyOptions{`, - `AllowedRuntimeClassNames:` + fmt.Sprintf("%v", this.AllowedRuntimeClassNames) + `,`, - `DefaultRuntimeClassName:` + valueToStringGenerated(this.DefaultRuntimeClassName) + `,`, - `}`, - }, "") - return s -} -func (this *SELinuxStrategyOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SELinuxStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "v11.SELinuxOptions", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Scale) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Scale{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ScaleSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ScaleSpec{`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `}`, - }, "") - return s -} -func (this *ScaleStatus) String() string { - if this == nil { - return "nil" - } - keysForSelector := make([]string, 0, len(this.Selector)) - for k := range this.Selector { - keysForSelector = append(keysForSelector, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - mapStringForSelector := "map[string]string{" - for _, k := range keysForSelector { - mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) - } - mapStringForSelector += "}" - s := strings.Join([]string{`&ScaleStatus{`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `Selector:` + mapStringForSelector + `,`, - `TargetSelector:` + fmt.Sprintf("%v", this.TargetSelector) + `,`, - `}`, - }, "") - return s -} -func (this *SupplementalGroupsStrategyOptions) String() string { - if this == nil { - return "nil" - } - repeatedStringForRanges := "[]IDRange{" - for _, f := range this.Ranges { - repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," - } - repeatedStringForRanges += "}" - s := strings.Join([]string{`&SupplementalGroupsStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + repeatedStringForRanges + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AllowedCSIDriver: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AllowedCSIDriver: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AllowedFlexVolume: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AllowedFlexVolume: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Driver = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AllowedHostPath: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AllowedHostPath: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PathPrefix", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PathPrefix = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSet) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSet: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = DaemonSetConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, DaemonSet{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.UpdateStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) - } - m.MinReadySeconds = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MinReadySeconds |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TemplateGeneration", wireType) - } - m.TemplateGeneration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TemplateGeneration |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.RevisionHistoryLimit = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentNumberScheduled", wireType) - } - m.CurrentNumberScheduled = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CurrentNumberScheduled |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumberMisscheduled", wireType) - } - m.NumberMisscheduled = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumberMisscheduled |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DesiredNumberScheduled", wireType) - } - m.DesiredNumberScheduled = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DesiredNumberScheduled |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumberReady", wireType) - } - m.NumberReady = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumberReady |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) - } - m.ObservedGeneration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ObservedGeneration |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedNumberScheduled", wireType) - } - m.UpdatedNumberScheduled = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.UpdatedNumberScheduled |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumberAvailable", wireType) - } - m.NumberAvailable = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumberAvailable |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumberUnavailable", wireType) - } - m.NumberUnavailable = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumberUnavailable |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.CollisionCount = &v - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, DaemonSetCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetUpdateStrategy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetUpdateStrategy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = DaemonSetUpdateStrategyType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RollingUpdate == nil { - m.RollingUpdate = &RollingUpdateDaemonSet{} - } - if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Deployment) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Deployment: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Deployment: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = DeploymentConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Deployment{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentRollback: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentRollback: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAnnotations", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.UpdatedAnnotations == nil { - m.UpdatedAnnotations = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.UpdatedAnnotations[mapkey] = mapvalue - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.RollbackTo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Replicas = &v - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Strategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) - } - m.MinReadySeconds = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MinReadySeconds |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.RevisionHistoryLimit = &v - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Paused = bool(v != 0) - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RollbackTo == nil { - m.RollbackTo = &RollbackConfig{} - } - if err := m.RollbackTo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ProgressDeadlineSeconds", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ProgressDeadlineSeconds = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + s := strings.Join([]string{`&ReplicaSetList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetSpec) String() string { + if this == nil { + return "nil" } - - if iNdEx > l { - return io.ErrUnexpectedEOF + s := strings.Join([]string{`&ReplicaSetSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetStatus) String() string { + if this == nil { + return "nil" } - return nil + repeatedStringForConditions := "[]ReplicaSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ReplicaSetStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s } -func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { +func (this *RollbackConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollbackConfig{`, + `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, + `}`, + }, "") + return s +} +func (this *RollingUpdateDaemonSet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollingUpdateDaemonSet{`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RollingUpdateDeployment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollingUpdateDeployment{`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Scale) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Scale{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScaleSpec{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleStatus) String() string { + if this == nil { + return "nil" + } + keysForSelector := make([]string, 0, len(this.Selector)) + for k := range this.Selector { + keysForSelector = append(keysForSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + mapStringForSelector := "map[string]string{" + for _, k := range keysForSelector { + mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) + } + mapStringForSelector += "}" + s := strings.Join([]string{`&ScaleStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `Selector:` + mapStringForSelector + `,`, + `TargetSelector:` + fmt.Sprintf("%v", this.TargetSelector) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *DaemonSet) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9521,55 +5405,17 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeploymentStatus: wiretype end group for non-group") + return fmt.Errorf("proto: DaemonSet: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) - } - m.ObservedGeneration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ObservedGeneration |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - m.Replicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Replicas |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - m.UpdatedReplicas = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9579,52 +5425,28 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedReplicas |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + if msglen < 0 { + return ErrInvalidLengthGenerated } - m.AvailableReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.AvailableReplicas |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UnavailableReplicas", wireType) + if postIndex > l { + return io.ErrUnexpectedEOF } - m.UnavailableReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.UnavailableReplicas |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - case 6: + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9651,16 +5473,15 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Conditions = append(m.Conditions, DeploymentCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - m.ReadyReplicas = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9670,31 +5491,25 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + if msglen < 0 { + return ErrInvalidLengthGenerated } - m.CollisionCount = &v + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -9716,7 +5531,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { +func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9739,10 +5554,10 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeploymentStrategy: wiretype end group for non-group") + return fmt.Errorf("proto: DaemonSetCondition: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DaemonSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -9775,11 +5590,43 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = DeploymentStrategyType(dAtA[iNdEx:postIndex]) + m.Type = DaemonSetConditionType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9806,13 +5653,74 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.RollingUpdate == nil { - m.RollingUpdate = &RollingUpdateDeployment{} - } - if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -9834,7 +5742,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } return nil } -func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { +func (m *DaemonSetList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9857,17 +5765,17 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: FSGroupStrategyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: DaemonSetList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: FSGroupStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DaemonSetList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9877,27 +5785,28 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Rule = FSGroupStrategyType(dAtA[iNdEx:postIndex]) + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9924,8 +5833,8 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, DaemonSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -9950,7 +5859,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { +func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9973,17 +5882,17 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: HTTPIngressPath: wiretype end group for non-group") + return fmt.Errorf("proto: DaemonSetSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPIngressPath: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DaemonSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9993,27 +5902,31 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10040,96 +5953,13 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PathType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := PathType(dAtA[iNdEx:postIndex]) - m.PathType = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HTTPIngressRuleValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPIngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10156,66 +5986,34 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Paths = append(m.Paths, HTTPIngressPath{}) - if err := m.Paths[len(m.Paths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.UpdateStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HostPortRange) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HostPortRange: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HostPortRange: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 5: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TemplateGeneration", wireType) } - m.Min = 0 + m.TemplateGeneration = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10225,16 +6023,16 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Min |= int32(b&0x7F) << shift + m.TemplateGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } } - case 2: + case 6: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) } - m.Max = 0 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10244,11 +6042,12 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Max |= int32(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } + m.RevisionHistoryLimit = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -10270,7 +6069,7 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } return nil } -func (m *IDRange) Unmarshal(dAtA []byte) error { +func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10293,17 +6092,17 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IDRange: wiretype end group for non-group") + return fmt.Errorf("proto: DaemonSetStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IDRange: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DaemonSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CurrentNumberScheduled", wireType) } - m.Min = 0 + m.CurrentNumberScheduled = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10313,16 +6112,16 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Min |= int64(b&0x7F) << shift + m.CurrentNumberScheduled |= int32(b&0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NumberMisscheduled", wireType) } - m.Max = 0 + m.NumberMisscheduled = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10332,66 +6131,73 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Max |= int64(b&0x7F) << shift + m.NumberMisscheduled |= int32(b&0x7F) << shift if b < 0x80 { break } } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredNumberScheduled", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated + m.DesiredNumberScheduled = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DesiredNumberScheduled |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberReady", wireType) } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IPBlock) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + m.NumberReady = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumberReady |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { - return io.ErrUnexpectedEOF + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IPBlock: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IPBlock: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CIDR", wireType) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedNumberScheduled", wireType) } - var stringLen uint64 + m.UpdatedNumberScheduled = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10401,29 +6207,74 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.UpdatedNumberScheduled |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberAvailable", wireType) } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + m.NumberAvailable = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumberAvailable |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - if postIndex > l { - return io.ErrUnexpectedEOF + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberUnavailable", wireType) } - m.CIDR = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: + m.NumberUnavailable = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumberUnavailable |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CollisionCount = &v + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Except", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10433,23 +6284,25 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Except = append(m.Except, string(dAtA[iNdEx:postIndex])) + m.Conditions = append(m.Conditions, DaemonSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -10472,7 +6325,7 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } return nil } -func (m *Ingress) Unmarshal(dAtA []byte) error { +func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10495,17 +6348,17 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Ingress: wiretype end group for non-group") + return fmt.Errorf("proto: DaemonSetUpdateStrategy: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DaemonSetUpdateStrategy: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10515,28 +6368,27 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Type = DaemonSetUpdateStrategyType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10563,40 +6415,10 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF + if m.RollingUpdate == nil { + m.RollingUpdate = &RollingUpdateDaemonSet{} } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -10621,7 +6443,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressBackend) Unmarshal(dAtA []byte) error { +func (m *Deployment) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10644,17 +6466,17 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressBackend: wiretype end group for non-group") + return fmt.Errorf("proto: Deployment: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressBackend: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Deployment: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10664,27 +6486,28 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.ServiceName = string(dAtA[iNdEx:postIndex]) + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServicePort", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10711,13 +6534,13 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ServicePort.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10744,10 +6567,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Resource == nil { - m.Resource = &v11.TypedLocalObjectReference{} - } - if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -10772,7 +6592,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressList) Unmarshal(dAtA []byte) error { +func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10795,17 +6615,17 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressList: wiretype end group for non-group") + return fmt.Errorf("proto: DeploymentCondition: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeploymentCondition: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10815,30 +6635,29 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Type = DeploymentConditionType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10848,79 +6667,27 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, Ingress{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressLoadBalancerIngress: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressLoadBalancerIngress: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -10948,11 +6715,11 @@ func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.IP = string(dAtA[iNdEx:postIndex]) + m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -10980,11 +6747,11 @@ func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Hostname = string(dAtA[iNdEx:postIndex]) + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11001,74 +6768,23 @@ func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ports = append(m.Ports, IngressPortStatus{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + if msglen < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressLoadBalancerStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressLoadBalancerStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11095,8 +6811,7 @@ func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Ingress = append(m.Ingress, IngressLoadBalancerIngress{}) - if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -11121,7 +6836,7 @@ func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { +func (m *DeploymentList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11144,36 +6859,17 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressPortStatus: wiretype end group for non-group") + return fmt.Errorf("proto: DeploymentList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressPortStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeploymentList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - m.Port = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Port |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -11183,29 +6879,30 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Protocol = k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -11215,24 +6912,25 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Error = &s + m.Items = append(m.Items, Deployment{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -11255,7 +6953,7 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressRule) Unmarshal(dAtA []byte) error { +func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11278,15 +6976,15 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressRule: wiretype end group for non-group") + return fmt.Errorf("proto: DeploymentRollback: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressRule: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeploymentRollback: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -11314,11 +7012,11 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Host = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IngressRuleValue", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAnnotations", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11345,63 +7043,107 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.IngressRuleValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + if m.UpdatedAnnotations == nil { + m.UpdatedAnnotations = make(map[string]string) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressRuleValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.UpdatedAnnotations[mapkey] = mapvalue + iNdEx = postIndex + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11428,10 +7170,7 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.HTTP == nil { - m.HTTP = &HTTPIngressRuleValue{} - } - if err := m.HTTP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.RollbackTo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -11456,7 +7195,7 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressSpec) Unmarshal(dAtA []byte) error { +func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11479,15 +7218,35 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressSpec: wiretype end group for non-group") + return fmt.Errorf("proto: DeploymentSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeploymentSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11514,16 +7273,16 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Backend == nil { - m.Backend = &IngressBackend{} + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} } - if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11550,14 +7309,13 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TLS = append(m.TLS, IngressTLS{}) - if err := m.TLS[len(m.TLS)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11584,16 +7342,74 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Rules = append(m.Rules, IngressRule{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Strategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RevisionHistoryLimit = &v + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Paused = bool(v != 0) + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IngressClassName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -11603,25 +7419,48 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.IngressClassName = &s + if m.RollbackTo == nil { + m.RollbackTo = &RollbackConfig{} + } + if err := m.RollbackTo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProgressDeadlineSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ProgressDeadlineSeconds = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -11643,7 +7482,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressStatus) Unmarshal(dAtA []byte) error { +func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11666,15 +7505,110 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressStatus: wiretype end group for non-group") + return fmt.Errorf("proto: DeploymentStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeploymentStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) + } + m.UpdatedReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UpdatedReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + } + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AvailableReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UnavailableReplicas", wireType) + } + m.UnavailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UnavailableReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11701,10 +7635,50 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Conditions = append(m.Conditions, DeploymentCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReadyReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CollisionCount = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -11726,7 +7700,7 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressTLS) Unmarshal(dAtA []byte) error { +func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11749,15 +7723,15 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressTLS: wiretype end group for non-group") + return fmt.Errorf("proto: DeploymentStrategy: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressTLS: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeploymentStrategy: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -11785,13 +7759,13 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex])) + m.Type = DeploymentStrategyType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -11801,23 +7775,27 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.SecretName = string(dAtA[iNdEx:postIndex]) + if m.RollingUpdate == nil { + m.RollingUpdate = &RollingUpdateDeployment{} + } + if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -11840,7 +7818,7 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { +func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11863,17 +7841,17 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicy: wiretype end group for non-group") + return fmt.Errorf("proto: HTTPIngressPath: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: HTTPIngressPath: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -11883,28 +7861,27 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11931,15 +7908,15 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PathType", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -11949,24 +7926,24 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := PathType(dAtA[iNdEx:postIndex]) + m.PathType = &s iNdEx = postIndex default: iNdEx = preIndex @@ -11989,7 +7966,7 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { +func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12012,49 +7989,15 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyEgressRule: wiretype end group for non-group") + return fmt.Errorf("proto: HTTPIngressRuleValue: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyEgressRule: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: HTTPIngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ports = append(m.Ports, NetworkPolicyPort{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12081,8 +8024,8 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.To = append(m.To, NetworkPolicyPeer{}) - if err := m.To[len(m.To)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Paths = append(m.Paths, HTTPIngressPath{}) + if err := m.Paths[len(m.Paths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -12107,7 +8050,7 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { +func (m *IPBlock) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12130,17 +8073,17 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyIngressRule: wiretype end group for non-group") + return fmt.Errorf("proto: IPBlock: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyIngressRule: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IPBlock: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CIDR", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12150,31 +8093,29 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Ports = append(m.Ports, NetworkPolicyPort{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.CIDR = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Except", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12184,25 +8125,23 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.From = append(m.From, NetworkPolicyPeer{}) - if err := m.From[len(m.From)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Except = append(m.Except, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -12225,7 +8164,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { +func (m *Ingress) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12248,15 +8187,15 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyList: wiretype end group for non-group") + return fmt.Errorf("proto: Ingress: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12283,13 +8222,13 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12316,8 +8255,40 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, NetworkPolicy{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -12342,7 +8313,7 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { +func (m *IngressBackend) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12365,17 +8336,17 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyPeer: wiretype end group for non-group") + return fmt.Errorf("proto: IngressBackend: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyPeer: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressBackend: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12385,31 +8356,27 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.PodSelector == nil { - m.PodSelector = &v1.LabelSelector{} - } - if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.ServiceName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ServicePort", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12436,16 +8403,13 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.NamespaceSelector == nil { - m.NamespaceSelector = &v1.LabelSelector{} - } - if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ServicePort.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IPBlock", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12472,10 +8436,10 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.IPBlock == nil { - m.IPBlock = &IPBlock{} + if m.Resource == nil { + m.Resource = &v11.TypedLocalObjectReference{} } - if err := m.IPBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -12500,7 +8464,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { +func (m *IngressList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12523,17 +8487,17 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyPort: wiretype end group for non-group") + return fmt.Errorf("proto: IngressList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyPort: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12543,28 +8507,28 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) - m.Protocol = &s + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12591,33 +8555,11 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Port == nil { - m.Port = &intstr.IntOrString{} - } - if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, Ingress{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EndPort", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.EndPort = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -12639,7 +8581,7 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { +func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12662,17 +8604,17 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicySpec: wiretype end group for non-group") + return fmt.Errorf("proto: IngressLoadBalancerIngress: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressLoadBalancerIngress: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12682,30 +8624,29 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.IP = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12715,29 +8656,27 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Ingress = append(m.Ingress, NetworkPolicyIngressRule{}) - if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Hostname = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Egress", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12764,16 +8703,66 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Egress = append(m.Egress, NetworkPolicyEgressRule{}) - if err := m.Egress[len(m.Egress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Ports = append(m.Ports, IngressPortStatus{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressLoadBalancerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressLoadBalancerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PolicyTypes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12783,23 +8772,25 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.PolicyTypes = append(m.PolicyTypes, PolicyType(dAtA[iNdEx:postIndex])) + m.Ingress = append(m.Ingress, IngressLoadBalancerIngress{}) + if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -12822,7 +8813,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicyStatus) Unmarshal(dAtA []byte) error { +func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12845,17 +8836,36 @@ func (m *NetworkPolicyStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyStatus: wiretype end group for non-group") + return fmt.Errorf("proto: IngressPortStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressPortStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12865,25 +8875,56 @@ func (m *NetworkPolicyStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Protocol = k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Conditions = append(m.Conditions, v1.Condition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.Error = &s iNdEx = postIndex default: iNdEx = preIndex @@ -12906,7 +8947,7 @@ func (m *NetworkPolicyStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { +func (m *IngressRule) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12929,17 +8970,17 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodSecurityPolicy: wiretype end group for non-group") + return fmt.Errorf("proto: IngressRule: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodSecurityPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressRule: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12949,28 +8990,27 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Host = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IngressRuleValue", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12997,7 +9037,7 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.IngressRuleValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -13022,7 +9062,7 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { +func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13045,15 +9085,15 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodSecurityPolicyList: wiretype end group for non-group") + return fmt.Errorf("proto: IngressRuleValue: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodSecurityPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13080,41 +9120,10 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF + if m.HTTP == nil { + m.HTTP = &HTTPIngressRuleValue{} } - m.Items = append(m.Items, PodSecurityPolicy{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.HTTP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -13139,7 +9148,7 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { +func (m *IngressSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13162,37 +9171,17 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodSecurityPolicySpec: wiretype end group for non-group") + return fmt.Errorf("proto: IngressSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodSecurityPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Privileged", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Privileged = bool(v != 0) - case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultAddCapabilities", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13202,29 +9191,33 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.DefaultAddCapabilities = append(m.DefaultAddCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) + if m.Backend == nil { + m.Backend = &IngressBackend{} + } + if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequiredDropCapabilities", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13234,29 +9227,31 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.RequiredDropCapabilities = append(m.RequiredDropCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) + m.TLS = append(m.TLS, IngressTLS{}) + if err := m.TLS[len(m.TLS)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 4: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedCapabilities", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13266,27 +9261,29 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedCapabilities = append(m.AllowedCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) + m.Rules = append(m.Rules, IngressRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 5: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IngressClassName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -13314,31 +9311,62 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Volumes = append(m.Volumes, FSType(dAtA[iNdEx:postIndex])) + s := string(dAtA[iNdEx:postIndex]) + m.IngressClassName = &s iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostNetwork", wireType) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated } - m.HostNetwork = bool(v != 0) - case 7: + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HostPorts", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13365,16 +9393,65 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.HostPorts = append(m.HostPorts, HostPortRange{}) - if err := m.HostPorts[len(m.HostPorts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostPID", wireType) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressTLS) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressTLS: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressTLS: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13384,37 +9461,29 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.HostPID = bool(v != 0) - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostIPC", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - m.HostIPC = bool(v != 0) - case 10: + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SELinux", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13424,28 +9493,77 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.SELinux.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.SecretName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { return err } - iNdEx = postIndex - case 11: + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13472,13 +9590,13 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.RunAsUser.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 12: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroups", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13505,13 +9623,13 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.SupplementalGroups.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 13: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSGroup", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13538,75 +9656,63 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.FSGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 14: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnlyRootFilesystem", wireType) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated } - m.ReadOnlyRootFilesystem = bool(v != 0) - case 15: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultAllowPrivilegeEscalation", wireType) + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - b := bool(v != 0) - m.DefaultAllowPrivilegeEscalation = &b - case 16: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowPrivilegeEscalation", wireType) + if iNdEx >= l { + return io.ErrUnexpectedEOF } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - b := bool(v != 0) - m.AllowPrivilegeEscalation = &b - case 17: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyEgressRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyEgressRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedHostPaths", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13633,14 +9739,14 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedHostPaths = append(m.AllowedHostPaths, AllowedHostPath{}) - if err := m.AllowedHostPaths[len(m.AllowedHostPaths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Ports = append(m.Ports, NetworkPolicyPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 18: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedFlexVolumes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13667,80 +9773,66 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedFlexVolumes = append(m.AllowedFlexVolumes, AllowedFlexVolume{}) - if err := m.AllowedFlexVolumes[len(m.AllowedFlexVolumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.To = append(m.To, NetworkPolicyPeer{}) + if err := m.To[len(m.To)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 19: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedUnsafeSysctls", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AllowedUnsafeSysctls = append(m.AllowedUnsafeSysctls, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 20: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ForbiddenSysctls", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - intStringLen := int(stringLen) - if intStringLen < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - if postIndex > l { + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.ForbiddenSysctls = append(m.ForbiddenSysctls, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 21: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyIngressRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyIngressRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedProcMountTypes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13750,27 +9842,29 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedProcMountTypes = append(m.AllowedProcMountTypes, k8s_io_api_core_v1.ProcMountType(dAtA[iNdEx:postIndex])) + m.Ports = append(m.Ports, NetworkPolicyPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 22: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RunAsGroup", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13797,16 +9891,64 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.RunAsGroup == nil { - m.RunAsGroup = &RunAsGroupStrategyOptions{} - } - if err := m.RunAsGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.From = append(m.From, NetworkPolicyPeer{}) + if err := m.From[len(m.From)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 23: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedCSIDrivers", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13833,14 +9975,13 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedCSIDrivers = append(m.AllowedCSIDrivers, AllowedCSIDriver{}) - if err := m.AllowedCSIDrivers[len(m.AllowedCSIDrivers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 24: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RuntimeClass", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13867,10 +10008,8 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.RuntimeClass == nil { - m.RuntimeClass = &RuntimeClassStrategyOptions{} - } - if err := m.RuntimeClass.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, NetworkPolicy{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -13895,7 +10034,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *ReplicaSet) Unmarshal(dAtA []byte) error { +func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13918,15 +10057,15 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReplicaSet: wiretype end group for non-group") + return fmt.Errorf("proto: NetworkPolicyPeer: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSet: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NetworkPolicyPeer: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13953,13 +10092,16 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.PodSelector == nil { + m.PodSelector = &v1.LabelSelector{} + } + if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13986,13 +10128,16 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.NamespaceSelector == nil { + m.NamespaceSelector = &v1.LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IPBlock", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14019,7 +10164,10 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.IPBlock == nil { + m.IPBlock = &IPBlock{} + } + if err := m.IPBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -14044,7 +10192,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } return nil } -func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { +func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14067,15 +10215,15 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReplicaSetCondition: wiretype end group for non-group") + return fmt.Errorf("proto: NetworkPolicyPort: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NetworkPolicyPort: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -14103,13 +10251,120 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = ReplicaSetConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: + s := k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) + m.Protocol = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Port == nil { + m.Port = &intstr.IntOrString{} + } + if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EndPort", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.EndPort = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicySpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14119,27 +10374,28 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14166,15 +10422,16 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Ingress = append(m.Ingress, NetworkPolicyIngressRule{}) + if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Egress", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14184,27 +10441,29 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Reason = string(dAtA[iNdEx:postIndex]) + m.Egress = append(m.Egress, NetworkPolicyEgressRule{}) + if err := m.Egress[len(m.Egress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 5: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PolicyTypes", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -14232,7 +10491,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(dAtA[iNdEx:postIndex]) + m.PolicyTypes = append(m.PolicyTypes, PolicyType(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -14255,7 +10514,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } return nil } -func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { +func (m *NetworkPolicyStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14278,48 +10537,15 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReplicaSetList: wiretype end group for non-group") + return fmt.Errorf("proto: NetworkPolicyStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSetList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NetworkPolicyStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14346,8 +10572,8 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, ReplicaSet{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Conditions = append(m.Conditions, v1.Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -14372,7 +10598,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } return nil } -func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { +func (m *ReplicaSet) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14395,17 +10621,17 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReplicaSetSpec: wiretype end group for non-group") + return fmt.Errorf("proto: ReplicaSet: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReplicaSet: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var v int32 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14415,15 +10641,28 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.Replicas = &v + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14450,16 +10689,13 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Selector == nil { - m.Selector = &v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14486,29 +10722,10 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) - } - m.MinReadySeconds = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MinReadySeconds |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -14530,7 +10747,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { +func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14553,17 +10770,17 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReplicaSetStatus: wiretype end group for non-group") + return fmt.Errorf("proto: ReplicaSetCondition: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReplicaSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - m.Replicas = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14573,35 +10790,29 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= int32(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FullyLabeledReplicas", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - m.FullyLabeledReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.FullyLabeledReplicas |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + if postIndex > l { + return io.ErrUnexpectedEOF } - m.ObservedGeneration = 0 + m.Type = ReplicaSetConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14611,35 +10822,29 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= int64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - m.ReadyReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ReadyReplicas |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) } - m.AvailableReplicas = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14649,16 +10854,30 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 6: + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14668,81 +10887,29 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Conditions = append(m.Conditions, ReplicaSetCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RollbackConfig) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RollbackConfig: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RollbackConfig: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } - m.Revision = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14752,11 +10919,24 @@ func (m *RollbackConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Revision |= int64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -14778,7 +10958,7 @@ func (m *RollbackConfig) Unmarshal(dAtA []byte) error { } return nil } -func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { +func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14801,15 +10981,15 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RollingUpdateDaemonSet: wiretype end group for non-group") + return fmt.Errorf("proto: ReplicaSetList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RollingUpdateDaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReplicaSetList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14836,16 +11016,13 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.MaxUnavailable == nil { - m.MaxUnavailable = &intstr.IntOrString{} - } - if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14872,10 +11049,8 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.MaxSurge == nil { - m.MaxSurge = &intstr.IntOrString{} - } - if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, ReplicaSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -14900,7 +11075,7 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { } return nil } -func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { +func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14923,15 +11098,35 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RollingUpdateDeployment: wiretype end group for non-group") + return fmt.Errorf("proto: ReplicaSetSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RollingUpdateDeployment: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReplicaSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14958,16 +11153,16 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.MaxUnavailable == nil { - m.MaxUnavailable = &intstr.IntOrString{} + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} } - if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14994,13 +11189,29 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.MaxSurge == nil { - m.MaxSurge = &intstr.IntOrString{} - } - if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -15022,7 +11233,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } return nil } -func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { +func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15045,17 +11256,74 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RunAsGroupStrategyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: ReplicaSetStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RunAsGroupStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReplicaSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FullyLabeledReplicas", wireType) + } + m.FullyLabeledReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FullyLabeledReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) } - var stringLen uint64 + m.ReadyReplicas = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15065,27 +11333,33 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) } - if postIndex > l { - return io.ErrUnexpectedEOF + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AvailableReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - m.Rule = RunAsGroupStrategy(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -15112,8 +11386,8 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Conditions = append(m.Conditions, ReplicaSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -15138,7 +11412,7 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { +func (m *RollbackConfig) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15161,49 +11435,17 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RunAsUserStrategyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: RollbackConfig: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RunAsUserStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RollbackConfig: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rule = RunAsUserStrategy(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) } - var msglen int + m.Revision = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15213,26 +11455,11 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.Revision |= int64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -15254,7 +11481,7 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { +func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15277,17 +11504,17 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RuntimeClassStrategyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: RollingUpdateDaemonSet: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RuntimeClassStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RollingUpdateDaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedRuntimeClassNames", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15297,29 +11524,33 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedRuntimeClassNames = append(m.AllowedRuntimeClassNames, string(dAtA[iNdEx:postIndex])) + if m.MaxUnavailable == nil { + m.MaxUnavailable = &intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultRuntimeClassName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15329,24 +11560,27 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.DefaultRuntimeClassName = &s + if m.MaxSurge == nil { + m.MaxSurge = &intstr.IntOrString{} + } + if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -15369,7 +11603,7 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { +func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15392,17 +11626,17 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SELinuxStrategyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: RollingUpdateDeployment: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SELinuxStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RollingUpdateDeployment: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15412,27 +11646,31 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Rule = SELinuxStrategy(dAtA[iNdEx:postIndex]) + if m.MaxUnavailable == nil { + m.MaxUnavailable = &intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SELinuxOptions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -15459,10 +11697,10 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.SELinuxOptions == nil { - m.SELinuxOptions = &v11.SELinuxOptions{} + if m.MaxSurge == nil { + m.MaxSurge = &intstr.IntOrString{} } - if err := m.SELinuxOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -15933,122 +12171,6 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SupplementalGroupsStrategyOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SupplementalGroupsStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rule = SupplementalGroupsStrategyType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.proto b/vendor/k8s.io/api/extensions/v1beta1/generated.proto index 0509bc3d67..3ab6a093b5 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/generated.proto +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.proto @@ -30,37 +30,6 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "k8s.io/api/extensions/v1beta1"; -// AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used. -message AllowedCSIDriver { - // Name is the registered name of the CSI driver - optional string name = 1; -} - -// AllowedFlexVolume represents a single Flexvolume that is allowed to be used. -// Deprecated: use AllowedFlexVolume from policy API Group instead. -message AllowedFlexVolume { - // driver is the name of the Flexvolume driver. - optional string driver = 1; -} - -// AllowedHostPath defines the host volume conditions that will be enabled by a policy -// for pods to use. It requires the path prefix to be defined. -// Deprecated: use AllowedHostPath from policy API Group instead. -message AllowedHostPath { - // pathPrefix is the path prefix that the host volume must match. - // It does not support `*`. - // Trailing slashes are trimmed when validating the path prefix with a host path. - // - // Examples: - // `/foo` would allow `/foo`, `/foo/` and `/foo/bar` - // `/foo` would not allow `/food` or `/etc/foo` - optional string pathPrefix = 1; - - // when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly. - // +optional - optional bool readOnly = 2; -} - // DEPRECATED - This group version of DaemonSet is deprecated by apps/v1beta2/DaemonSet. See the release notes for // more information. // DaemonSet represents the configuration of a daemon set. @@ -398,19 +367,6 @@ message DeploymentStrategy { optional RollingUpdateDeployment rollingUpdate = 2; } -// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. -// Deprecated: use FSGroupStrategyOptions from policy API Group instead. -message FSGroupStrategyOptions { - // rule is the strategy that will dictate what FSGroup is used in the SecurityContext. - // +optional - optional string rule = 1; - - // ranges are the allowed ranges of fs groups. If you would like to force a single - // fs group then supply a single range with the same start and end. Required for MustRunAs. - // +optional - repeated IDRange ranges = 2; -} - // HTTPIngressPath associates a path with a backend. Incoming urls matching the // path are forwarded to the backend. message HTTPIngressPath { @@ -453,27 +409,6 @@ message HTTPIngressRuleValue { repeated HTTPIngressPath paths = 1; } -// HostPortRange defines a range of host ports that will be enabled by a policy -// for pods to use. It requires both the start and end to be defined. -// Deprecated: use HostPortRange from policy API Group instead. -message HostPortRange { - // min is the start of the range, inclusive. - optional int32 min = 1; - - // max is the end of the range, inclusive. - optional int32 max = 2; -} - -// IDRange provides a min/max of an allowed range of IDs. -// Deprecated: use IDRange from policy API Group instead. -message IDRange { - // min is the start of the range, inclusive. - optional int64 min = 1; - - // max is the end of the range, inclusive. - optional int64 max = 2; -} - // DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock. // IPBlock describes a particular CIDR (Ex. "192.168.1.0/24","2001:db8::/64") that is allowed // to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs @@ -875,164 +810,6 @@ message NetworkPolicyStatus { repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1; } -// PodSecurityPolicy governs the ability to make requests that affect the Security Context -// that will be applied to a pod and container. -// Deprecated: use PodSecurityPolicy from policy API Group instead. -message PodSecurityPolicy { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // spec defines the policy enforced. - // +optional - optional PodSecurityPolicySpec spec = 2; -} - -// PodSecurityPolicyList is a list of PodSecurityPolicy objects. -// Deprecated: use PodSecurityPolicyList from policy API Group instead. -message PodSecurityPolicyList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // items is a list of schema objects. - repeated PodSecurityPolicy items = 2; -} - -// PodSecurityPolicySpec defines the policy enforced. -// Deprecated: use PodSecurityPolicySpec from policy API Group instead. -message PodSecurityPolicySpec { - // privileged determines if a pod can request to be run as privileged. - // +optional - optional bool privileged = 1; - - // defaultAddCapabilities is the default set of capabilities that will be added to the container - // unless the pod spec specifically drops the capability. You may not list a capability in both - // defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly - // allowed, and need not be included in the allowedCapabilities list. - // +optional - repeated string defaultAddCapabilities = 2; - - // requiredDropCapabilities are the capabilities that will be dropped from the container. These - // are required to be dropped and cannot be added. - // +optional - repeated string requiredDropCapabilities = 3; - - // allowedCapabilities is a list of capabilities that can be requested to add to the container. - // Capabilities in this field may be added at the pod author's discretion. - // You must not list a capability in both allowedCapabilities and requiredDropCapabilities. - // +optional - repeated string allowedCapabilities = 4; - - // volumes is an allowlist of volume plugins. Empty indicates that - // no volumes may be used. To allow all volumes you may use '*'. - // +optional - repeated string volumes = 5; - - // hostNetwork determines if the policy allows the use of HostNetwork in the pod spec. - // +optional - optional bool hostNetwork = 6; - - // hostPorts determines which host port ranges are allowed to be exposed. - // +optional - repeated HostPortRange hostPorts = 7; - - // hostPID determines if the policy allows the use of HostPID in the pod spec. - // +optional - optional bool hostPID = 8; - - // hostIPC determines if the policy allows the use of HostIPC in the pod spec. - // +optional - optional bool hostIPC = 9; - - // seLinux is the strategy that will dictate the allowable labels that may be set. - optional SELinuxStrategyOptions seLinux = 10; - - // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. - optional RunAsUserStrategyOptions runAsUser = 11; - - // RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. - // If this field is omitted, the pod's RunAsGroup can take any value. This field requires the - // RunAsGroup feature gate to be enabled. - // +optional - optional RunAsGroupStrategyOptions runAsGroup = 22; - - // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. - optional SupplementalGroupsStrategyOptions supplementalGroups = 12; - - // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. - optional FSGroupStrategyOptions fsGroup = 13; - - // readOnlyRootFilesystem when set to true will force containers to run with a read only root file - // system. If the container specifically requests to run with a non-read only root file system - // the PSP should deny the pod. - // If set to false the container may run with a read only root file system if it wishes but it - // will not be forced to. - // +optional - optional bool readOnlyRootFilesystem = 14; - - // defaultAllowPrivilegeEscalation controls the default setting for whether a - // process can gain more privileges than its parent process. - // +optional - optional bool defaultAllowPrivilegeEscalation = 15; - - // allowPrivilegeEscalation determines if a pod can request to allow - // privilege escalation. If unspecified, defaults to true. - // +optional - optional bool allowPrivilegeEscalation = 16; - - // allowedHostPaths is an allowlist of host paths. Empty indicates - // that all host paths may be used. - // +optional - repeated AllowedHostPath allowedHostPaths = 17; - - // allowedFlexVolumes is an allowlist of Flexvolumes. Empty or nil indicates that all - // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes - // is allowed in the "volumes" field. - // +optional - repeated AllowedFlexVolume allowedFlexVolumes = 18; - - // AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. - // An empty value indicates that any CSI driver can be used for inline ephemeral volumes. - // +optional - repeated AllowedCSIDriver allowedCSIDrivers = 23; - - // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. - // Each entry is either a plain sysctl name or ends in "*" in which case it is considered - // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. - // Kubelet has to allowlist all unsafe sysctls explicitly to avoid rejection. - // - // Examples: - // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. - // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. - // +optional - repeated string allowedUnsafeSysctls = 19; - - // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. - // Each entry is either a plain sysctl name or ends in "*" in which case it is considered - // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. - // - // Examples: - // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. - // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. - // +optional - repeated string forbiddenSysctls = 20; - - // AllowedProcMountTypes is an allowlist of allowed ProcMountTypes. - // Empty or nil indicates that only the DefaultProcMountType may be used. - // This requires the ProcMountType feature flag to be enabled. - // +optional - repeated string allowedProcMountTypes = 21; - - // runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. - // If this field is omitted, the pod's runtimeClassName field is unrestricted. - // Enforcement of this field depends on the RuntimeClass feature gate being enabled. - // +optional - optional RuntimeClassStrategyOptions runtimeClass = 24; -} - // DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for // more information. // ReplicaSet ensures that a specified number of pod replicas are running at any given time. @@ -1227,57 +1004,6 @@ message RollingUpdateDeployment { optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } -// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. -// Deprecated: use RunAsGroupStrategyOptions from policy API Group instead. -message RunAsGroupStrategyOptions { - // rule is the strategy that will dictate the allowable RunAsGroup values that may be set. - optional string rule = 1; - - // ranges are the allowed ranges of gids that may be used. If you would like to force a single gid - // then supply a single range with the same start and end. Required for MustRunAs. - // +optional - repeated IDRange ranges = 2; -} - -// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. -// Deprecated: use RunAsUserStrategyOptions from policy API Group instead. -message RunAsUserStrategyOptions { - // rule is the strategy that will dictate the allowable RunAsUser values that may be set. - optional string rule = 1; - - // ranges are the allowed ranges of uids that may be used. If you would like to force a single uid - // then supply a single range with the same start and end. Required for MustRunAs. - // +optional - repeated IDRange ranges = 2; -} - -// RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses -// for a pod. -message RuntimeClassStrategyOptions { - // allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod. - // A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the - // list. An empty list requires the RuntimeClassName field to be unset. - repeated string allowedRuntimeClassNames = 1; - - // defaultRuntimeClassName is the default RuntimeClassName to set on the pod. - // The default MUST be allowed by the allowedRuntimeClassNames list. - // A value of nil does not mutate the Pod. - // +optional - optional string defaultRuntimeClassName = 2; -} - -// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. -// Deprecated: use SELinuxStrategyOptions from policy API Group instead. -message SELinuxStrategyOptions { - // rule is the strategy that will dictate the allowable labels that may be set. - optional string rule = 1; - - // seLinuxOptions required to run as; required for MustRunAs - // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - // +optional - optional k8s.io.api.core.v1.SELinuxOptions seLinuxOptions = 2; -} - // represents a scaling request for a resource. message Scale { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. @@ -1305,7 +1031,7 @@ message ScaleStatus { // actual number of observed instances of the scaled object. optional int32 replicas = 1; - // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // selector is a label query over pods that should match the replicas count. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional // +mapType=atomic map selector = 2; @@ -1320,16 +1046,3 @@ message ScaleStatus { optional string targetSelector = 3; } -// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. -// Deprecated: use SupplementalGroupsStrategyOptions from policy API Group instead. -message SupplementalGroupsStrategyOptions { - // rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. - // +optional - optional string rule = 1; - - // ranges are the allowed ranges of supplemental groups. If you would like to force a single - // supplemental group then supply a single range with the same start and end. Required for MustRunAs. - // +optional - repeated IDRange ranges = 2; -} - diff --git a/vendor/k8s.io/api/extensions/v1beta1/register.go b/vendor/k8s.io/api/extensions/v1beta1/register.go index c69eff0bc4..d58908edc0 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/register.go +++ b/vendor/k8s.io/api/extensions/v1beta1/register.go @@ -54,8 +54,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { &IngressList{}, &ReplicaSet{}, &ReplicaSetList{}, - &PodSecurityPolicy{}, - &PodSecurityPolicyList{}, &NetworkPolicy{}, &NetworkPolicyList{}, ) diff --git a/vendor/k8s.io/api/extensions/v1beta1/types.go b/vendor/k8s.io/api/extensions/v1beta1/types.go index be1b95e62c..c0ac6fa25d 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/types.go +++ b/vendor/k8s.io/api/extensions/v1beta1/types.go @@ -35,7 +35,7 @@ type ScaleStatus struct { // actual number of observed instances of the scaled object. Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` - // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // selector is a label query over pods that should match the replicas count. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional // +mapType=atomic Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` @@ -1021,389 +1021,6 @@ type ReplicaSetCondition struct { Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` } -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.2 -// +k8s:prerelease-lifecycle-gen:deprecated=1.11 -// +k8s:prerelease-lifecycle-gen:removed=1.16 -// +k8s:prerelease-lifecycle-gen:replacement=policy,v1beta1,PodSecurityPolicy - -// PodSecurityPolicy governs the ability to make requests that affect the Security Context -// that will be applied to a pod and container. -// Deprecated: use PodSecurityPolicy from policy API Group instead. -type PodSecurityPolicy struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // spec defines the policy enforced. - // +optional - Spec PodSecurityPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` -} - -// PodSecurityPolicySpec defines the policy enforced. -// Deprecated: use PodSecurityPolicySpec from policy API Group instead. -type PodSecurityPolicySpec struct { - // privileged determines if a pod can request to be run as privileged. - // +optional - Privileged bool `json:"privileged,omitempty" protobuf:"varint,1,opt,name=privileged"` - // defaultAddCapabilities is the default set of capabilities that will be added to the container - // unless the pod spec specifically drops the capability. You may not list a capability in both - // defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly - // allowed, and need not be included in the allowedCapabilities list. - // +optional - DefaultAddCapabilities []v1.Capability `json:"defaultAddCapabilities,omitempty" protobuf:"bytes,2,rep,name=defaultAddCapabilities,casttype=k8s.io/api/core/v1.Capability"` - // requiredDropCapabilities are the capabilities that will be dropped from the container. These - // are required to be dropped and cannot be added. - // +optional - RequiredDropCapabilities []v1.Capability `json:"requiredDropCapabilities,omitempty" protobuf:"bytes,3,rep,name=requiredDropCapabilities,casttype=k8s.io/api/core/v1.Capability"` - // allowedCapabilities is a list of capabilities that can be requested to add to the container. - // Capabilities in this field may be added at the pod author's discretion. - // You must not list a capability in both allowedCapabilities and requiredDropCapabilities. - // +optional - AllowedCapabilities []v1.Capability `json:"allowedCapabilities,omitempty" protobuf:"bytes,4,rep,name=allowedCapabilities,casttype=k8s.io/api/core/v1.Capability"` - // volumes is an allowlist of volume plugins. Empty indicates that - // no volumes may be used. To allow all volumes you may use '*'. - // +optional - Volumes []FSType `json:"volumes,omitempty" protobuf:"bytes,5,rep,name=volumes,casttype=FSType"` - // hostNetwork determines if the policy allows the use of HostNetwork in the pod spec. - // +optional - HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,6,opt,name=hostNetwork"` - // hostPorts determines which host port ranges are allowed to be exposed. - // +optional - HostPorts []HostPortRange `json:"hostPorts,omitempty" protobuf:"bytes,7,rep,name=hostPorts"` - // hostPID determines if the policy allows the use of HostPID in the pod spec. - // +optional - HostPID bool `json:"hostPID,omitempty" protobuf:"varint,8,opt,name=hostPID"` - // hostIPC determines if the policy allows the use of HostIPC in the pod spec. - // +optional - HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,9,opt,name=hostIPC"` - // seLinux is the strategy that will dictate the allowable labels that may be set. - SELinux SELinuxStrategyOptions `json:"seLinux" protobuf:"bytes,10,opt,name=seLinux"` - // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. - RunAsUser RunAsUserStrategyOptions `json:"runAsUser" protobuf:"bytes,11,opt,name=runAsUser"` - // RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. - // If this field is omitted, the pod's RunAsGroup can take any value. This field requires the - // RunAsGroup feature gate to be enabled. - // +optional - RunAsGroup *RunAsGroupStrategyOptions `json:"runAsGroup,omitempty" protobuf:"bytes,22,opt,name=runAsGroup"` - // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. - SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups" protobuf:"bytes,12,opt,name=supplementalGroups"` - // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. - FSGroup FSGroupStrategyOptions `json:"fsGroup" protobuf:"bytes,13,opt,name=fsGroup"` - // readOnlyRootFilesystem when set to true will force containers to run with a read only root file - // system. If the container specifically requests to run with a non-read only root file system - // the PSP should deny the pod. - // If set to false the container may run with a read only root file system if it wishes but it - // will not be forced to. - // +optional - ReadOnlyRootFilesystem bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,14,opt,name=readOnlyRootFilesystem"` - // defaultAllowPrivilegeEscalation controls the default setting for whether a - // process can gain more privileges than its parent process. - // +optional - DefaultAllowPrivilegeEscalation *bool `json:"defaultAllowPrivilegeEscalation,omitempty" protobuf:"varint,15,opt,name=defaultAllowPrivilegeEscalation"` - // allowPrivilegeEscalation determines if a pod can request to allow - // privilege escalation. If unspecified, defaults to true. - // +optional - AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,16,opt,name=allowPrivilegeEscalation"` - // allowedHostPaths is an allowlist of host paths. Empty indicates - // that all host paths may be used. - // +optional - AllowedHostPaths []AllowedHostPath `json:"allowedHostPaths,omitempty" protobuf:"bytes,17,rep,name=allowedHostPaths"` - // allowedFlexVolumes is an allowlist of Flexvolumes. Empty or nil indicates that all - // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes - // is allowed in the "volumes" field. - // +optional - AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" protobuf:"bytes,18,rep,name=allowedFlexVolumes"` - // AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. - // An empty value indicates that any CSI driver can be used for inline ephemeral volumes. - // +optional - AllowedCSIDrivers []AllowedCSIDriver `json:"allowedCSIDrivers,omitempty" protobuf:"bytes,23,rep,name=allowedCSIDrivers"` - // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. - // Each entry is either a plain sysctl name or ends in "*" in which case it is considered - // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. - // Kubelet has to allowlist all unsafe sysctls explicitly to avoid rejection. - // - // Examples: - // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. - // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. - // +optional - AllowedUnsafeSysctls []string `json:"allowedUnsafeSysctls,omitempty" protobuf:"bytes,19,rep,name=allowedUnsafeSysctls"` - // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. - // Each entry is either a plain sysctl name or ends in "*" in which case it is considered - // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. - // - // Examples: - // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. - // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. - // +optional - ForbiddenSysctls []string `json:"forbiddenSysctls,omitempty" protobuf:"bytes,20,rep,name=forbiddenSysctls"` - // AllowedProcMountTypes is an allowlist of allowed ProcMountTypes. - // Empty or nil indicates that only the DefaultProcMountType may be used. - // This requires the ProcMountType feature flag to be enabled. - // +optional - AllowedProcMountTypes []v1.ProcMountType `json:"allowedProcMountTypes,omitempty" protobuf:"bytes,21,opt,name=allowedProcMountTypes"` - // runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. - // If this field is omitted, the pod's runtimeClassName field is unrestricted. - // Enforcement of this field depends on the RuntimeClass feature gate being enabled. - // +optional - RuntimeClass *RuntimeClassStrategyOptions `json:"runtimeClass,omitempty" protobuf:"bytes,24,opt,name=runtimeClass"` -} - -// AllowedHostPath defines the host volume conditions that will be enabled by a policy -// for pods to use. It requires the path prefix to be defined. -// Deprecated: use AllowedHostPath from policy API Group instead. -type AllowedHostPath struct { - // pathPrefix is the path prefix that the host volume must match. - // It does not support `*`. - // Trailing slashes are trimmed when validating the path prefix with a host path. - // - // Examples: - // `/foo` would allow `/foo`, `/foo/` and `/foo/bar` - // `/foo` would not allow `/food` or `/etc/foo` - PathPrefix string `json:"pathPrefix,omitempty" protobuf:"bytes,1,rep,name=pathPrefix"` - - // when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly. - // +optional - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"` -} - -// FSType gives strong typing to different file systems that are used by volumes. -// Deprecated: use FSType from policy API Group instead. -type FSType string - -const ( - AzureFile FSType = "azureFile" - Flocker FSType = "flocker" - FlexVolume FSType = "flexVolume" - HostPath FSType = "hostPath" - EmptyDir FSType = "emptyDir" - GCEPersistentDisk FSType = "gcePersistentDisk" - AWSElasticBlockStore FSType = "awsElasticBlockStore" - GitRepo FSType = "gitRepo" - Secret FSType = "secret" - NFS FSType = "nfs" - ISCSI FSType = "iscsi" - Glusterfs FSType = "glusterfs" - PersistentVolumeClaim FSType = "persistentVolumeClaim" - RBD FSType = "rbd" - Cinder FSType = "cinder" - CephFS FSType = "cephFS" - DownwardAPI FSType = "downwardAPI" - FC FSType = "fc" - ConfigMap FSType = "configMap" - Quobyte FSType = "quobyte" - AzureDisk FSType = "azureDisk" - CSI FSType = "csi" - All FSType = "*" -) - -// AllowedFlexVolume represents a single Flexvolume that is allowed to be used. -// Deprecated: use AllowedFlexVolume from policy API Group instead. -type AllowedFlexVolume struct { - // driver is the name of the Flexvolume driver. - Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` -} - -// AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used. -type AllowedCSIDriver struct { - // Name is the registered name of the CSI driver - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` -} - -// HostPortRange defines a range of host ports that will be enabled by a policy -// for pods to use. It requires both the start and end to be defined. -// Deprecated: use HostPortRange from policy API Group instead. -type HostPortRange struct { - // min is the start of the range, inclusive. - Min int32 `json:"min" protobuf:"varint,1,opt,name=min"` - // max is the end of the range, inclusive. - Max int32 `json:"max" protobuf:"varint,2,opt,name=max"` -} - -// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. -// Deprecated: use SELinuxStrategyOptions from policy API Group instead. -type SELinuxStrategyOptions struct { - // rule is the strategy that will dictate the allowable labels that may be set. - Rule SELinuxStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=SELinuxStrategy"` - // seLinuxOptions required to run as; required for MustRunAs - // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - // +optional - SELinuxOptions *v1.SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,2,opt,name=seLinuxOptions"` -} - -// SELinuxStrategy denotes strategy types for generating SELinux options for a -// Security Context. -// Deprecated: use SELinuxStrategy from policy API Group instead. -type SELinuxStrategy string - -const ( - // SELinuxStrategyMustRunAs means that container must have SELinux labels of X applied. - // Deprecated: use SELinuxStrategyMustRunAs from policy API Group instead. - SELinuxStrategyMustRunAs SELinuxStrategy = "MustRunAs" - // SELinuxStrategyRunAsAny means that container may make requests for any SELinux context labels. - // Deprecated: use SELinuxStrategyRunAsAny from policy API Group instead. - SELinuxStrategyRunAsAny SELinuxStrategy = "RunAsAny" -) - -// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. -// Deprecated: use RunAsUserStrategyOptions from policy API Group instead. -type RunAsUserStrategyOptions struct { - // rule is the strategy that will dictate the allowable RunAsUser values that may be set. - Rule RunAsUserStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsUserStrategy"` - // ranges are the allowed ranges of uids that may be used. If you would like to force a single uid - // then supply a single range with the same start and end. Required for MustRunAs. - // +optional - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - -// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. -// Deprecated: use RunAsGroupStrategyOptions from policy API Group instead. -type RunAsGroupStrategyOptions struct { - // rule is the strategy that will dictate the allowable RunAsGroup values that may be set. - Rule RunAsGroupStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsGroupStrategy"` - // ranges are the allowed ranges of gids that may be used. If you would like to force a single gid - // then supply a single range with the same start and end. Required for MustRunAs. - // +optional - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - -// IDRange provides a min/max of an allowed range of IDs. -// Deprecated: use IDRange from policy API Group instead. -type IDRange struct { - // min is the start of the range, inclusive. - Min int64 `json:"min" protobuf:"varint,1,opt,name=min"` - // max is the end of the range, inclusive. - Max int64 `json:"max" protobuf:"varint,2,opt,name=max"` -} - -// RunAsUserStrategy denotes strategy types for generating RunAsUser values for a -// Security Context. -// Deprecated: use RunAsUserStrategy from policy API Group instead. -type RunAsUserStrategy string - -const ( - // RunAsUserStrategyMustRunAs means that container must run as a particular uid. - // Deprecated: use RunAsUserStrategyMustRunAs from policy API Group instead. - RunAsUserStrategyMustRunAs RunAsUserStrategy = "MustRunAs" - // RunAsUserStrategyMustRunAsNonRoot means that container must run as a non-root uid. - // Deprecated: use RunAsUserStrategyMustRunAsNonRoot from policy API Group instead. - RunAsUserStrategyMustRunAsNonRoot RunAsUserStrategy = "MustRunAsNonRoot" - // RunAsUserStrategyRunAsAny means that container may make requests for any uid. - // Deprecated: use RunAsUserStrategyRunAsAny from policy API Group instead. - RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny" -) - -// RunAsGroupStrategy denotes strategy types for generating RunAsGroup values for a -// Security Context. -// Deprecated: use RunAsGroupStrategy from policy API Group instead. -type RunAsGroupStrategy string - -const ( - // RunAsGroupStrategyMayRunAs means that container does not need to run with a particular gid. - // However, when RunAsGroup are specified, they have to fall in the defined range. - RunAsGroupStrategyMayRunAs RunAsGroupStrategy = "MayRunAs" - // RunAsGroupStrategyMustRunAs means that container must run as a particular gid. - // Deprecated: use RunAsGroupStrategyMustRunAs from policy API Group instead. - RunAsGroupStrategyMustRunAs RunAsGroupStrategy = "MustRunAs" - // RunAsGroupStrategyRunAsAny means that container may make requests for any gid. - // Deprecated: use RunAsGroupStrategyRunAsAny from policy API Group instead. - RunAsGroupStrategyRunAsAny RunAsGroupStrategy = "RunAsAny" -) - -// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. -// Deprecated: use FSGroupStrategyOptions from policy API Group instead. -type FSGroupStrategyOptions struct { - // rule is the strategy that will dictate what FSGroup is used in the SecurityContext. - // +optional - Rule FSGroupStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=FSGroupStrategyType"` - // ranges are the allowed ranges of fs groups. If you would like to force a single - // fs group then supply a single range with the same start and end. Required for MustRunAs. - // +optional - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - -// FSGroupStrategyType denotes strategy types for generating FSGroup values for a -// SecurityContext -// Deprecated: use FSGroupStrategyType from policy API Group instead. -type FSGroupStrategyType string - -const ( - // FSGroupStrategyMustRunAs meant that container must have FSGroup of X applied. - // Deprecated: use FSGroupStrategyMustRunAs from policy API Group instead. - FSGroupStrategyMustRunAs FSGroupStrategyType = "MustRunAs" - // FSGroupStrategyRunAsAny means that container may make requests for any FSGroup labels. - // Deprecated: use FSGroupStrategyRunAsAny from policy API Group instead. - FSGroupStrategyRunAsAny FSGroupStrategyType = "RunAsAny" -) - -// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. -// Deprecated: use SupplementalGroupsStrategyOptions from policy API Group instead. -type SupplementalGroupsStrategyOptions struct { - // rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. - // +optional - Rule SupplementalGroupsStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=SupplementalGroupsStrategyType"` - // ranges are the allowed ranges of supplemental groups. If you would like to force a single - // supplemental group then supply a single range with the same start and end. Required for MustRunAs. - // +optional - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - -// SupplementalGroupsStrategyType denotes strategy types for determining valid supplemental -// groups for a SecurityContext. -// Deprecated: use SupplementalGroupsStrategyType from policy API Group instead. -type SupplementalGroupsStrategyType string - -const ( - // SupplementalGroupsStrategyMustRunAs means that container must run as a particular gid. - // Deprecated: use SupplementalGroupsStrategyMustRunAs from policy API Group instead. - SupplementalGroupsStrategyMustRunAs SupplementalGroupsStrategyType = "MustRunAs" - // SupplementalGroupsStrategyRunAsAny means that container may make requests for any gid. - // Deprecated: use SupplementalGroupsStrategyRunAsAny from policy API Group instead. - SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny" -) - -// RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses -// for a pod. -type RuntimeClassStrategyOptions struct { - // allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod. - // A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the - // list. An empty list requires the RuntimeClassName field to be unset. - AllowedRuntimeClassNames []string `json:"allowedRuntimeClassNames" protobuf:"bytes,1,rep,name=allowedRuntimeClassNames"` - // defaultRuntimeClassName is the default RuntimeClassName to set on the pod. - // The default MUST be allowed by the allowedRuntimeClassNames list. - // A value of nil does not mutate the Pod. - // +optional - DefaultRuntimeClassName *string `json:"defaultRuntimeClassName,omitempty" protobuf:"bytes,2,opt,name=defaultRuntimeClassName"` -} - -// AllowAllRuntimeClassNames can be used as a value for the -// RuntimeClassStrategyOptions.AllowedRuntimeClassNames field and means that any RuntimeClassName is -// allowed. -const AllowAllRuntimeClassNames = "*" - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.2 -// +k8s:prerelease-lifecycle-gen:deprecated=1.11 -// +k8s:prerelease-lifecycle-gen:removed=1.16 -// +k8s:prerelease-lifecycle-gen:replacement=policy,v1beta1,PodSecurityPolicyList - -// PodSecurityPolicyList is a list of PodSecurityPolicy objects. -// Deprecated: use PodSecurityPolicyList from policy API Group instead. -type PodSecurityPolicyList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // items is a list of schema objects. - Items []PodSecurityPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` -} - // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.3 diff --git a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go index 302eb95382..39aaf48537 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go @@ -24,37 +24,9 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_AllowedCSIDriver = map[string]string{ - "": "AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used.", - "name": "Name is the registered name of the CSI driver", -} - -func (AllowedCSIDriver) SwaggerDoc() map[string]string { - return map_AllowedCSIDriver -} - -var map_AllowedFlexVolume = map[string]string{ - "": "AllowedFlexVolume represents a single Flexvolume that is allowed to be used. Deprecated: use AllowedFlexVolume from policy API Group instead.", - "driver": "driver is the name of the Flexvolume driver.", -} - -func (AllowedFlexVolume) SwaggerDoc() map[string]string { - return map_AllowedFlexVolume -} - -var map_AllowedHostPath = map[string]string{ - "": "AllowedHostPath defines the host volume conditions that will be enabled by a policy for pods to use. It requires the path prefix to be defined. Deprecated: use AllowedHostPath from policy API Group instead.", - "pathPrefix": "pathPrefix is the path prefix that the host volume must match. It does not support `*`. Trailing slashes are trimmed when validating the path prefix with a host path.\n\nExamples: `/foo` would allow `/foo`, `/foo/` and `/foo/bar` `/foo` would not allow `/food` or `/etc/foo`", - "readOnly": "when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly.", -} - -func (AllowedHostPath) SwaggerDoc() map[string]string { - return map_AllowedHostPath -} - var map_DaemonSet = map[string]string{ "": "DEPRECATED - This group version of DaemonSet is deprecated by apps/v1beta2/DaemonSet. See the release notes for more information. DaemonSet represents the configuration of a daemon set.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -220,16 +192,6 @@ func (DeploymentStrategy) SwaggerDoc() map[string]string { return map_DeploymentStrategy } -var map_FSGroupStrategyOptions = map[string]string{ - "": "FSGroupStrategyOptions defines the strategy type and options used to create the strategy. Deprecated: use FSGroupStrategyOptions from policy API Group instead.", - "rule": "rule is the strategy that will dictate what FSGroup is used in the SecurityContext.", - "ranges": "ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end. Required for MustRunAs.", -} - -func (FSGroupStrategyOptions) SwaggerDoc() map[string]string { - return map_FSGroupStrategyOptions -} - var map_HTTPIngressPath = map[string]string{ "": "HTTPIngressPath associates a path with a backend. Incoming urls matching the path are forwarded to the backend.", "path": "Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/'. When unspecified, all paths from incoming requests are matched.", @@ -250,26 +212,6 @@ func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { return map_HTTPIngressRuleValue } -var map_HostPortRange = map[string]string{ - "": "HostPortRange defines a range of host ports that will be enabled by a policy for pods to use. It requires both the start and end to be defined. Deprecated: use HostPortRange from policy API Group instead.", - "min": "min is the start of the range, inclusive.", - "max": "max is the end of the range, inclusive.", -} - -func (HostPortRange) SwaggerDoc() map[string]string { - return map_HostPortRange -} - -var map_IDRange = map[string]string{ - "": "IDRange provides a min/max of an allowed range of IDs. Deprecated: use IDRange from policy API Group instead.", - "min": "min is the start of the range, inclusive.", - "max": "max is the end of the range, inclusive.", -} - -func (IDRange) SwaggerDoc() map[string]string { - return map_IDRange -} - var map_IPBlock = map[string]string{ "": "DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock. IPBlock describes a particular CIDR (Ex. \"192.168.1.0/24\",\"2001:db8::/64\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.", "cidr": "CIDR is a string representing the IP Block Valid examples are \"192.168.1.0/24\" or \"2001:db8::/64\"", @@ -476,58 +418,6 @@ func (NetworkPolicyStatus) SwaggerDoc() map[string]string { return map_NetworkPolicyStatus } -var map_PodSecurityPolicy = map[string]string{ - "": "PodSecurityPolicy governs the ability to make requests that affect the Security Context that will be applied to a pod and container. Deprecated: use PodSecurityPolicy from policy API Group instead.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "spec defines the policy enforced.", -} - -func (PodSecurityPolicy) SwaggerDoc() map[string]string { - return map_PodSecurityPolicy -} - -var map_PodSecurityPolicyList = map[string]string{ - "": "PodSecurityPolicyList is a list of PodSecurityPolicy objects. Deprecated: use PodSecurityPolicyList from policy API Group instead.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "items is a list of schema objects.", -} - -func (PodSecurityPolicyList) SwaggerDoc() map[string]string { - return map_PodSecurityPolicyList -} - -var map_PodSecurityPolicySpec = map[string]string{ - "": "PodSecurityPolicySpec defines the policy enforced. Deprecated: use PodSecurityPolicySpec from policy API Group instead.", - "privileged": "privileged determines if a pod can request to be run as privileged.", - "defaultAddCapabilities": "defaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capability in both defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly allowed, and need not be included in the allowedCapabilities list.", - "requiredDropCapabilities": "requiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added.", - "allowedCapabilities": "allowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field may be added at the pod author's discretion. You must not list a capability in both allowedCapabilities and requiredDropCapabilities.", - "volumes": "volumes is an allowlist of volume plugins. Empty indicates that no volumes may be used. To allow all volumes you may use '*'.", - "hostNetwork": "hostNetwork determines if the policy allows the use of HostNetwork in the pod spec.", - "hostPorts": "hostPorts determines which host port ranges are allowed to be exposed.", - "hostPID": "hostPID determines if the policy allows the use of HostPID in the pod spec.", - "hostIPC": "hostIPC determines if the policy allows the use of HostIPC in the pod spec.", - "seLinux": "seLinux is the strategy that will dictate the allowable labels that may be set.", - "runAsUser": "runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.", - "runAsGroup": "RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. If this field is omitted, the pod's RunAsGroup can take any value. This field requires the RunAsGroup feature gate to be enabled.", - "supplementalGroups": "supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.", - "fsGroup": "fsGroup is the strategy that will dictate what fs group is used by the SecurityContext.", - "readOnlyRootFilesystem": "readOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.", - "defaultAllowPrivilegeEscalation": "defaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process.", - "allowPrivilegeEscalation": "allowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.", - "allowedHostPaths": "allowedHostPaths is an allowlist of host paths. Empty indicates that all host paths may be used.", - "allowedFlexVolumes": "allowedFlexVolumes is an allowlist of Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"volumes\" field.", - "allowedCSIDrivers": "AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. An empty value indicates that any CSI driver can be used for inline ephemeral volumes.", - "allowedUnsafeSysctls": "allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to allowlist all unsafe sysctls explicitly to avoid rejection.\n\nExamples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc.", - "forbiddenSysctls": "forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\n\nExamples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc.", - "allowedProcMountTypes": "AllowedProcMountTypes is an allowlist of allowed ProcMountTypes. Empty or nil indicates that only the DefaultProcMountType may be used. This requires the ProcMountType feature flag to be enabled.", - "runtimeClass": "runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. If this field is omitted, the pod's runtimeClassName field is unrestricted. Enforcement of this field depends on the RuntimeClass feature gate being enabled.", -} - -func (PodSecurityPolicySpec) SwaggerDoc() map[string]string { - return map_PodSecurityPolicySpec -} - var map_ReplicaSet = map[string]string{ "": "DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for more information. ReplicaSet ensures that a specified number of pod replicas are running at any given time.", "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -617,46 +507,6 @@ func (RollingUpdateDeployment) SwaggerDoc() map[string]string { return map_RollingUpdateDeployment } -var map_RunAsGroupStrategyOptions = map[string]string{ - "": "RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. Deprecated: use RunAsGroupStrategyOptions from policy API Group instead.", - "rule": "rule is the strategy that will dictate the allowable RunAsGroup values that may be set.", - "ranges": "ranges are the allowed ranges of gids that may be used. If you would like to force a single gid then supply a single range with the same start and end. Required for MustRunAs.", -} - -func (RunAsGroupStrategyOptions) SwaggerDoc() map[string]string { - return map_RunAsGroupStrategyOptions -} - -var map_RunAsUserStrategyOptions = map[string]string{ - "": "RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. Deprecated: use RunAsUserStrategyOptions from policy API Group instead.", - "rule": "rule is the strategy that will dictate the allowable RunAsUser values that may be set.", - "ranges": "ranges are the allowed ranges of uids that may be used. If you would like to force a single uid then supply a single range with the same start and end. Required for MustRunAs.", -} - -func (RunAsUserStrategyOptions) SwaggerDoc() map[string]string { - return map_RunAsUserStrategyOptions -} - -var map_RuntimeClassStrategyOptions = map[string]string{ - "": "RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses for a pod.", - "allowedRuntimeClassNames": "allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod. A value of \"*\" means that any RuntimeClass name is allowed, and must be the only item in the list. An empty list requires the RuntimeClassName field to be unset.", - "defaultRuntimeClassName": "defaultRuntimeClassName is the default RuntimeClassName to set on the pod. The default MUST be allowed by the allowedRuntimeClassNames list. A value of nil does not mutate the Pod.", -} - -func (RuntimeClassStrategyOptions) SwaggerDoc() map[string]string { - return map_RuntimeClassStrategyOptions -} - -var map_SELinuxStrategyOptions = map[string]string{ - "": "SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. Deprecated: use SELinuxStrategyOptions from policy API Group instead.", - "rule": "rule is the strategy that will dictate the allowable labels that may be set.", - "seLinuxOptions": "seLinuxOptions required to run as; required for MustRunAs More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", -} - -func (SELinuxStrategyOptions) SwaggerDoc() map[string]string { - return map_SELinuxStrategyOptions -} - var map_Scale = map[string]string{ "": "represents a scaling request for a resource.", "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", @@ -680,7 +530,7 @@ func (ScaleSpec) SwaggerDoc() map[string]string { var map_ScaleStatus = map[string]string{ "": "represents the current status of a scale subresource.", "replicas": "actual number of observed instances of the scaled object.", - "selector": "label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", + "selector": "selector is a label query over pods that should match the replicas count. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/", "targetSelector": "label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", } @@ -688,14 +538,4 @@ func (ScaleStatus) SwaggerDoc() map[string]string { return map_ScaleStatus } -var map_SupplementalGroupsStrategyOptions = map[string]string{ - "": "SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. Deprecated: use SupplementalGroupsStrategyOptions from policy API Group instead.", - "rule": "rule is the strategy that will dictate what supplemental groups is used in the SecurityContext.", - "ranges": "ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end. Required for MustRunAs.", -} - -func (SupplementalGroupsStrategyOptions) SwaggerDoc() map[string]string { - return map_SupplementalGroupsStrategyOptions -} - // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go index 671aa2d9dc..b6e9272992 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go @@ -28,54 +28,6 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AllowedCSIDriver) DeepCopyInto(out *AllowedCSIDriver) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedCSIDriver. -func (in *AllowedCSIDriver) DeepCopy() *AllowedCSIDriver { - if in == nil { - return nil - } - out := new(AllowedCSIDriver) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AllowedFlexVolume) DeepCopyInto(out *AllowedFlexVolume) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedFlexVolume. -func (in *AllowedFlexVolume) DeepCopy() *AllowedFlexVolume { - if in == nil { - return nil - } - out := new(AllowedFlexVolume) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AllowedHostPath) DeepCopyInto(out *AllowedHostPath) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedHostPath. -func (in *AllowedHostPath) DeepCopy() *AllowedHostPath { - if in == nil { - return nil - } - out := new(AllowedHostPath) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DaemonSet) DeepCopyInto(out *DaemonSet) { *out = *in @@ -435,27 +387,6 @@ func (in *DeploymentStrategy) DeepCopy() *DeploymentStrategy { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FSGroupStrategyOptions) DeepCopyInto(out *FSGroupStrategyOptions) { - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FSGroupStrategyOptions. -func (in *FSGroupStrategyOptions) DeepCopy() *FSGroupStrategyOptions { - if in == nil { - return nil - } - out := new(FSGroupStrategyOptions) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HTTPIngressPath) DeepCopyInto(out *HTTPIngressPath) { *out = *in @@ -501,38 +432,6 @@ func (in *HTTPIngressRuleValue) DeepCopy() *HTTPIngressRuleValue { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HostPortRange) DeepCopyInto(out *HostPortRange) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPortRange. -func (in *HostPortRange) DeepCopy() *HostPortRange { - if in == nil { - return nil - } - out := new(HostPortRange) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IDRange) DeepCopyInto(out *IDRange) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IDRange. -func (in *IDRange) DeepCopy() *IDRange { - if in == nil { - return nil - } - out := new(IDRange) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IPBlock) DeepCopyInto(out *IPBlock) { *out = *in @@ -1062,161 +961,6 @@ func (in *NetworkPolicyStatus) DeepCopy() *NetworkPolicyStatus { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSecurityPolicy) DeepCopyInto(out *PodSecurityPolicy) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicy. -func (in *PodSecurityPolicy) DeepCopy() *PodSecurityPolicy { - if in == nil { - return nil - } - out := new(PodSecurityPolicy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodSecurityPolicy) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSecurityPolicyList) DeepCopyInto(out *PodSecurityPolicyList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PodSecurityPolicy, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicyList. -func (in *PodSecurityPolicyList) DeepCopy() *PodSecurityPolicyList { - if in == nil { - return nil - } - out := new(PodSecurityPolicyList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodSecurityPolicyList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { - *out = *in - if in.DefaultAddCapabilities != nil { - in, out := &in.DefaultAddCapabilities, &out.DefaultAddCapabilities - *out = make([]corev1.Capability, len(*in)) - copy(*out, *in) - } - if in.RequiredDropCapabilities != nil { - in, out := &in.RequiredDropCapabilities, &out.RequiredDropCapabilities - *out = make([]corev1.Capability, len(*in)) - copy(*out, *in) - } - if in.AllowedCapabilities != nil { - in, out := &in.AllowedCapabilities, &out.AllowedCapabilities - *out = make([]corev1.Capability, len(*in)) - copy(*out, *in) - } - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]FSType, len(*in)) - copy(*out, *in) - } - if in.HostPorts != nil { - in, out := &in.HostPorts, &out.HostPorts - *out = make([]HostPortRange, len(*in)) - copy(*out, *in) - } - in.SELinux.DeepCopyInto(&out.SELinux) - in.RunAsUser.DeepCopyInto(&out.RunAsUser) - if in.RunAsGroup != nil { - in, out := &in.RunAsGroup, &out.RunAsGroup - *out = new(RunAsGroupStrategyOptions) - (*in).DeepCopyInto(*out) - } - in.SupplementalGroups.DeepCopyInto(&out.SupplementalGroups) - in.FSGroup.DeepCopyInto(&out.FSGroup) - if in.DefaultAllowPrivilegeEscalation != nil { - in, out := &in.DefaultAllowPrivilegeEscalation, &out.DefaultAllowPrivilegeEscalation - *out = new(bool) - **out = **in - } - if in.AllowPrivilegeEscalation != nil { - in, out := &in.AllowPrivilegeEscalation, &out.AllowPrivilegeEscalation - *out = new(bool) - **out = **in - } - if in.AllowedHostPaths != nil { - in, out := &in.AllowedHostPaths, &out.AllowedHostPaths - *out = make([]AllowedHostPath, len(*in)) - copy(*out, *in) - } - if in.AllowedFlexVolumes != nil { - in, out := &in.AllowedFlexVolumes, &out.AllowedFlexVolumes - *out = make([]AllowedFlexVolume, len(*in)) - copy(*out, *in) - } - if in.AllowedCSIDrivers != nil { - in, out := &in.AllowedCSIDrivers, &out.AllowedCSIDrivers - *out = make([]AllowedCSIDriver, len(*in)) - copy(*out, *in) - } - if in.AllowedUnsafeSysctls != nil { - in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ForbiddenSysctls != nil { - in, out := &in.ForbiddenSysctls, &out.ForbiddenSysctls - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.AllowedProcMountTypes != nil { - in, out := &in.AllowedProcMountTypes, &out.AllowedProcMountTypes - *out = make([]corev1.ProcMountType, len(*in)) - copy(*out, *in) - } - if in.RuntimeClass != nil { - in, out := &in.RuntimeClass, &out.RuntimeClass - *out = new(RuntimeClassStrategyOptions) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicySpec. -func (in *PodSecurityPolicySpec) DeepCopy() *PodSecurityPolicySpec { - if in == nil { - return nil - } - out := new(PodSecurityPolicySpec) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ReplicaSet) DeepCopyInto(out *ReplicaSet) { *out = *in @@ -1413,95 +1157,6 @@ func (in *RollingUpdateDeployment) DeepCopy() *RollingUpdateDeployment { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RunAsGroupStrategyOptions) DeepCopyInto(out *RunAsGroupStrategyOptions) { - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsGroupStrategyOptions. -func (in *RunAsGroupStrategyOptions) DeepCopy() *RunAsGroupStrategyOptions { - if in == nil { - return nil - } - out := new(RunAsGroupStrategyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RunAsUserStrategyOptions) DeepCopyInto(out *RunAsUserStrategyOptions) { - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsUserStrategyOptions. -func (in *RunAsUserStrategyOptions) DeepCopy() *RunAsUserStrategyOptions { - if in == nil { - return nil - } - out := new(RunAsUserStrategyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RuntimeClassStrategyOptions) DeepCopyInto(out *RuntimeClassStrategyOptions) { - *out = *in - if in.AllowedRuntimeClassNames != nil { - in, out := &in.AllowedRuntimeClassNames, &out.AllowedRuntimeClassNames - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.DefaultRuntimeClassName != nil { - in, out := &in.DefaultRuntimeClassName, &out.DefaultRuntimeClassName - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClassStrategyOptions. -func (in *RuntimeClassStrategyOptions) DeepCopy() *RuntimeClassStrategyOptions { - if in == nil { - return nil - } - out := new(RuntimeClassStrategyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SELinuxStrategyOptions) DeepCopyInto(out *SELinuxStrategyOptions) { - *out = *in - if in.SELinuxOptions != nil { - in, out := &in.SELinuxOptions, &out.SELinuxOptions - *out = new(corev1.SELinuxOptions) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SELinuxStrategyOptions. -func (in *SELinuxStrategyOptions) DeepCopy() *SELinuxStrategyOptions { - if in == nil { - return nil - } - out := new(SELinuxStrategyOptions) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Scale) DeepCopyInto(out *Scale) { *out = *in @@ -1568,24 +1223,3 @@ func (in *ScaleStatus) DeepCopy() *ScaleStatus { in.DeepCopyInto(out) return out } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SupplementalGroupsStrategyOptions) DeepCopyInto(out *SupplementalGroupsStrategyOptions) { - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SupplementalGroupsStrategyOptions. -func (in *SupplementalGroupsStrategyOptions) DeepCopy() *SupplementalGroupsStrategyOptions { - if in == nil { - return nil - } - out := new(SupplementalGroupsStrategyOptions) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.prerelease-lifecycle.go index 963aaffba3..5c93542282 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.prerelease-lifecycle.go @@ -235,54 +235,6 @@ func (in *NetworkPolicyList) APILifecycleRemoved() (major, minor int) { return 1, 16 } -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *PodSecurityPolicy) APILifecycleIntroduced() (major, minor int) { - return 1, 2 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *PodSecurityPolicy) APILifecycleDeprecated() (major, minor int) { - return 1, 11 -} - -// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. -// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. -func (in *PodSecurityPolicy) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "policy", Version: "v1beta1", Kind: "PodSecurityPolicy"} -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *PodSecurityPolicy) APILifecycleRemoved() (major, minor int) { - return 1, 16 -} - -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *PodSecurityPolicyList) APILifecycleIntroduced() (major, minor int) { - return 1, 2 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *PodSecurityPolicyList) APILifecycleDeprecated() (major, minor int) { - return 1, 11 -} - -// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. -// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. -func (in *PodSecurityPolicyList) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "policy", Version: "v1beta1", Kind: "PodSecurityPolicyList"} -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *PodSecurityPolicyList) APILifecycleRemoved() (major, minor int) { - return 1, 16 -} - // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. func (in *ReplicaSet) APILifecycleIntroduced() (major, minor int) { diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go index ac6f7179a0..c95999fa5e 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_FlowDistinguisherMethod = map[string]string{ diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go index fe4f8022a6..fc08e128db 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_FlowDistinguisherMethod = map[string]string{ diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go index 4bedcce3ed..b2eff7f96e 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta2 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_FlowDistinguisherMethod = map[string]string{ diff --git a/vendor/k8s.io/api/flowcontrol/v1beta3/types_swagger_doc_generated.go b/vendor/k8s.io/api/flowcontrol/v1beta3/types_swagger_doc_generated.go index e2bd27e8c5..728252c0cf 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta3/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta3/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta3 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_FlowDistinguisherMethod = map[string]string{ diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/types_swagger_doc_generated.go index 8d51e77a08..dadf95e1d5 100644 --- a/vendor/k8s.io/api/imagepolicy/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ImageReview = map[string]string{ diff --git a/vendor/k8s.io/api/networking/v1/generated.proto b/vendor/k8s.io/api/networking/v1/generated.proto index 8196a14b96..ed194a89d5 100644 --- a/vendor/k8s.io/api/networking/v1/generated.proto +++ b/vendor/k8s.io/api/networking/v1/generated.proto @@ -33,14 +33,14 @@ option go_package = "k8s.io/api/networking/v1"; // HTTPIngressPath associates a path with a backend. Incoming urls matching the // path are forwarded to the backend. message HTTPIngressPath { - // Path is matched against the path of an incoming request. Currently it can + // path is matched against the path of an incoming request. Currently it can // contain characters disallowed from the conventional "path" part of a URL // as defined by RFC 3986. Paths must begin with a '/' and must be present // when using PathType with value "Exact" or "Prefix". // +optional optional string path = 1; - // PathType determines the interpretation of the Path matching. PathType can + // pathType determines the interpretation of the path matching. PathType can // be one of the following values: // * Exact: Matches the URL path exactly. // * Prefix: Matches based on a URL path prefix split by '/'. Matching is @@ -56,7 +56,7 @@ message HTTPIngressPath { // Implementations are required to support all path types. optional string pathType = 3; - // Backend defines the referenced service endpoint to which the traffic + // backend defines the referenced service endpoint to which the traffic // will be forwarded to. optional IngressBackend backend = 2; } @@ -67,7 +67,7 @@ message HTTPIngressPath { // to match against everything after the last '/' and before the first '?' // or '#'. message HTTPIngressRuleValue { - // A collection of paths that map requests to backends. + // paths is a collection of paths that map requests to backends. // +listType=atomic repeated HTTPIngressPath paths = 1; } @@ -76,13 +76,13 @@ message HTTPIngressRuleValue { // to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs // that should not be included within this rule. message IPBlock { - // CIDR is a string representing the IP Block + // cidr is a string representing the IPBlock // Valid examples are "192.168.1.0/24" or "2001:db8::/64" optional string cidr = 1; - // Except is a slice of CIDRs that should not be included within an IP Block + // except is a slice of CIDRs that should not be included within an IPBlock // Valid examples are "192.168.1.0/24" or "2001:db8::/64" - // Except values will be rejected if they are outside the CIDR range + // Except values will be rejected if they are outside the cidr range // +optional repeated string except = 2; } @@ -97,12 +97,12 @@ message Ingress { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec is the desired state of the Ingress. + // spec is the desired state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional IngressSpec spec = 2; - // Status is the current state of the Ingress. + // status is the current state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional IngressStatus status = 3; @@ -110,12 +110,12 @@ message Ingress { // IngressBackend describes all endpoints for a given service and port. message IngressBackend { - // Service references a Service as a Backend. + // service references a service as a backend. // This is a mutually exclusive setting with "Resource". // +optional optional IngressServiceBackend service = 4; - // Resource is an ObjectRef to another Kubernetes resource in the namespace + // resource is an ObjectRef to another Kubernetes resource in the namespace // of the Ingress object. If resource is specified, a service.Name and // service.Port must not be specified. // This is a mutually exclusive setting with "Service". @@ -134,7 +134,7 @@ message IngressClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec is the desired state of the IngressClass. + // spec is the desired state of the IngressClass. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional IngressClassSpec spec = 2; @@ -146,31 +146,31 @@ message IngressClassList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of IngressClasses. + // items is the list of IngressClasses. repeated IngressClass items = 2; } // IngressClassParametersReference identifies an API object. This can be used // to specify a cluster or namespace-scoped resource. message IngressClassParametersReference { - // APIGroup is the group for the resource being referenced. If APIGroup is + // apiGroup is the group for the resource being referenced. If APIGroup is // not specified, the specified Kind must be in the core API group. For any // other third-party types, APIGroup is required. // +optional optional string aPIGroup = 1; - // Kind is the type of resource being referenced. + // kind is the type of resource being referenced. optional string kind = 2; - // Name is the name of resource being referenced. + // name is the name of resource being referenced. optional string name = 3; - // Scope represents if this refers to a cluster or namespace scoped resource. + // scope represents if this refers to a cluster or namespace scoped resource. // This may be set to "Cluster" (default) or "Namespace". // +optional optional string scope = 4; - // Namespace is the namespace of the resource being referenced. This field is + // namespace is the namespace of the resource being referenced. This field is // required when scope is set to "Namespace" and must be unset when scope is set to // "Cluster". // +optional @@ -179,15 +179,15 @@ message IngressClassParametersReference { // IngressClassSpec provides information about the class of an Ingress. message IngressClassSpec { - // Controller refers to the name of the controller that should handle this + // controller refers to the name of the controller that should handle this // class. This allows for different "flavors" that are controlled by the - // same controller. For example, you may have different Parameters for the + // same controller. For example, you may have different parameters for the // same implementing controller. This should be specified as a // domain-prefixed path no more than 250 characters in length, e.g. // "acme.io/ingress-controller". This field is immutable. optional string controller = 1; - // Parameters is a link to a custom resource containing additional + // parameters is a link to a custom resource containing additional // configuration for the controller. This is optional if the controller does // not require extra parameters. // +optional @@ -201,21 +201,21 @@ message IngressList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of Ingress. + // items is the list of Ingress. repeated Ingress items = 2; } // IngressLoadBalancerIngress represents the status of a load-balancer ingress point. message IngressLoadBalancerIngress { - // IP is set for load-balancer ingress points that are IP based. + // ip is set for load-balancer ingress points that are IP based. // +optional optional string ip = 1; - // Hostname is set for load-balancer ingress points that are DNS based. + // hostname is set for load-balancer ingress points that are DNS based. // +optional optional string hostname = 2; - // Ports provides information about the ports exposed by this LoadBalancer. + // ports provides information about the ports exposed by this LoadBalancer. // +listType=atomic // +optional repeated IngressPortStatus ports = 4; @@ -223,21 +223,21 @@ message IngressLoadBalancerIngress { // IngressLoadBalancerStatus represents the status of a load-balancer. message IngressLoadBalancerStatus { - // Ingress is a list containing ingress points for the load-balancer. + // ingress is a list containing ingress points for the load-balancer. // +optional repeated IngressLoadBalancerIngress ingress = 1; } // IngressPortStatus represents the error condition of a service port message IngressPortStatus { - // Port is the port number of the ingress port. + // port is the port number of the ingress port. optional int32 port = 1; - // Protocol is the protocol of the ingress port. + // protocol is the protocol of the ingress port. // The supported values are: "TCP", "UDP", "SCTP" optional string protocol = 2; - // Error is to record the problem with the service port + // error is to record the problem with the service port // The format of the error shall comply with the following rules: // - built-in error values shall be specified in this file and those shall use // CamelCase names @@ -256,7 +256,7 @@ message IngressPortStatus { // the related backend services. Incoming requests are first evaluated for a host // match, then routed to the backend associated with the matching IngressRuleValue. message IngressRule { - // Host is the fully qualified domain name of a network host, as defined by RFC 3986. + // host is the fully qualified domain name of a network host, as defined by RFC 3986. // Note the following deviations from the "host" part of the // URI as defined in RFC 3986: // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to @@ -269,14 +269,14 @@ message IngressRule { // IngressRuleValue. If the host is unspecified, the Ingress routes all // traffic based on the specified IngressRuleValue. // - // Host can be "precise" which is a domain name without the terminating dot of + // host can be "precise" which is a domain name without the terminating dot of // a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name // prefixed with a single wildcard label (e.g. "*.foo.com"). // The wildcard character '*' must appear by itself as the first DNS label and // matches only a single label. You cannot have a wildcard label by itself (e.g. Host == "*"). // Requests will be matched against the Host field in the following way: - // 1. If Host is precise, the request matches this rule if the http host header is equal to Host. - // 2. If Host is a wildcard, then the request matches this rule if the http host header + // 1. If host is precise, the request matches this rule if the http host header is equal to Host. + // 2. If host is a wildcard, then the request matches this rule if the http host header // is to equal to the suffix (removing the first label) of the wildcard rule. // +optional optional string host = 1; @@ -301,18 +301,18 @@ message IngressRuleValue { // IngressServiceBackend references a Kubernetes Service as a Backend. message IngressServiceBackend { - // Name is the referenced service. The service must exist in + // name is the referenced service. The service must exist in // the same namespace as the Ingress object. optional string name = 1; - // Port of the referenced service. A port name or port number + // port of the referenced service. A port name or port number // is required for a IngressServiceBackend. optional ServiceBackendPort port = 2; } // IngressSpec describes the Ingress the user wishes to exist. message IngressSpec { - // IngressClassName is the name of an IngressClass cluster resource. Ingress + // ingressClassName is the name of an IngressClass cluster resource. Ingress // controller implementations use this field to know whether they should be // serving this Ingress resource, by a transitive connection // (controller -> IngressClass -> Ingress resource). Although the @@ -325,24 +325,24 @@ message IngressSpec { // +optional optional string ingressClassName = 4; - // DefaultBackend is the backend that should handle requests that don't + // defaultBackend is the backend that should handle requests that don't // match any rule. If Rules are not specified, DefaultBackend must be specified. // If DefaultBackend is not set, the handling of requests that do not match any // of the rules will be up to the Ingress controller. // +optional optional IngressBackend defaultBackend = 1; - // TLS configuration. Currently the Ingress only supports a single TLS - // port, 443. If multiple members of this list specify different hosts, they - // will be multiplexed on the same port according to the hostname specified + // tls represents the TLS configuration. Currently the Ingress only supports a + // single TLS port, 443. If multiple members of this list specify different hosts, + // they will be multiplexed on the same port according to the hostname specified // through the SNI TLS extension, if the ingress controller fulfilling the // ingress supports SNI. // +listType=atomic // +optional repeated IngressTLS tls = 2; - // A list of host rules used to configure the Ingress. If unspecified, or - // no rule matches, all traffic is sent to the default backend. + // rules is a list of host rules used to configure the Ingress. If unspecified, + // or no rule matches, all traffic is sent to the default backend. // +listType=atomic // +optional repeated IngressRule rules = 3; @@ -350,14 +350,14 @@ message IngressSpec { // IngressStatus describe the current state of the Ingress. message IngressStatus { - // LoadBalancer contains the current status of the load-balancer. + // loadBalancer contains the current status of the load-balancer. // +optional optional IngressLoadBalancerStatus loadBalancer = 1; } -// IngressTLS describes the transport layer security associated with an Ingress. +// IngressTLS describes the transport layer security associated with an ingress. message IngressTLS { - // Hosts are a list of hosts included in the TLS certificate. The values in + // hosts is a list of hosts included in the TLS certificate. The values in // this list must match the name/s used in the tlsSecret. Defaults to the // wildcard host setting for the loadbalancer controller fulfilling this // Ingress, if left unspecified. @@ -365,11 +365,11 @@ message IngressTLS { // +optional repeated string hosts = 1; - // SecretName is the name of the secret used to terminate TLS traffic on + // secretName is the name of the secret used to terminate TLS traffic on // port 443. Field is left optional to allow TLS routing based on SNI // hostname alone. If the SNI host in a listener conflicts with the "Host" // header field used by an IngressRule, the SNI host is used for termination - // and value of the Host header is used for routing. + // and value of the "Host" header is used for routing. // +optional optional string secretName = 2; } @@ -381,11 +381,11 @@ message NetworkPolicy { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the desired behavior for this NetworkPolicy. + // spec represents the specification of the desired behavior for this NetworkPolicy. // +optional optional NetworkPolicySpec spec = 2; - // Status is the current state of the NetworkPolicy. + // status represents the current state of the NetworkPolicy. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional NetworkPolicyStatus status = 3; @@ -395,7 +395,7 @@ message NetworkPolicy { // matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. // This type is beta-level in 1.8 message NetworkPolicyEgressRule { - // List of destination ports for outgoing traffic. + // ports is a list of destination ports for outgoing traffic. // Each item in this list is combined using a logical OR. If this field is // empty or missing, this rule matches all ports (traffic not restricted by port). // If this field is present and contains at least one item, then this rule allows @@ -403,7 +403,7 @@ message NetworkPolicyEgressRule { // +optional repeated NetworkPolicyPort ports = 1; - // List of destinations for outgoing traffic of pods selected for this rule. + // to is a list of destinations for outgoing traffic of pods selected for this rule. // Items in this list are combined using a logical OR operation. If this field is // empty or missing, this rule matches all destinations (traffic not restricted by // destination). If this field is present and contains at least one item, this rule @@ -415,15 +415,15 @@ message NetworkPolicyEgressRule { // NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods // matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from. message NetworkPolicyIngressRule { - // List of ports which should be made accessible on the pods selected for this - // rule. Each item in this list is combined using a logical OR. If this field is + // ports is a list of ports which should be made accessible on the pods selected for + // this rule. Each item in this list is combined using a logical OR. If this field is // empty or missing, this rule matches all ports (traffic not restricted by port). // If this field is present and contains at least one item, then this rule allows // traffic only if the traffic matches at least one port in the list. // +optional repeated NetworkPolicyPort ports = 1; - // List of sources which should be able to access the pods selected for this rule. + // from is a list of sources which should be able to access the pods selected for this rule. // Items in this list are combined using a logical OR operation. If this field is // empty or missing, this rule matches all sources (traffic not restricted by // source). If this field is present and contains at least one item, this rule @@ -439,32 +439,32 @@ message NetworkPolicyList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of schema objects. + // items is a list of schema objects. repeated NetworkPolicy items = 2; } // NetworkPolicyPeer describes a peer to allow traffic to/from. Only certain combinations of // fields are allowed message NetworkPolicyPeer { - // This is a label selector which selects Pods. This field follows standard label + // podSelector is a label selector which selects pods. This field follows standard label // selector semantics; if present but empty, it selects all pods. // - // If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects - // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. - // Otherwise it selects the Pods matching PodSelector in the policy's own Namespace. + // If namespaceSelector is also set, then the NetworkPolicyPeer as a whole selects + // the pods matching podSelector in the Namespaces selected by NamespaceSelector. + // Otherwise it selects the pods matching podSelector in the policy's own namespace. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; - // Selects Namespaces using cluster-scoped labels. This field follows standard label - // selector semantics; if present but empty, it selects all namespaces. + // namespaceSelector selects namespaces using cluster-scoped labels. This field follows + // standard label selector semantics; if present but empty, it selects all namespaces. // - // If PodSelector is also set, then the NetworkPolicyPeer as a whole selects - // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. - // Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector. + // If podSelector is also set, then the NetworkPolicyPeer as a whole selects + // the pods matching podSelector in the namespaces selected by namespaceSelector. + // Otherwise it selects all pods in the namespaces selected by namespaceSelector. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 2; - // IPBlock defines policy on a particular IPBlock. If this field is set then + // ipBlock defines policy on a particular IPBlock. If this field is set then // neither of the other fields can be. // +optional optional IPBlock ipBlock = 3; @@ -472,19 +472,19 @@ message NetworkPolicyPeer { // NetworkPolicyPort describes a port to allow traffic on message NetworkPolicyPort { - // The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this - // field defaults to TCP. + // protocol represents the protocol (TCP, UDP, or SCTP) which traffic must match. + // If not specified, this field defaults to TCP. // +optional optional string protocol = 1; - // The port on the given protocol. This can either be a numerical or named + // port represents the port on the given protocol. This can either be a numerical or named // port on a pod. If this field is not provided, this matches all port names and // numbers. // If present, only traffic on the specified protocol AND port will be matched. // +optional optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2; - // If set, indicates that the range of ports from port to endPort, inclusive, + // endPort indicates that the range of ports from port to endPort if set, inclusive, // should be allowed by the policy. This field cannot be defined if the port field // is not defined or if the port field is defined as a named (string) port. // The endPort must be equal or greater than port. @@ -494,16 +494,16 @@ message NetworkPolicyPort { // NetworkPolicySpec provides the specification of a NetworkPolicy message NetworkPolicySpec { - // Selects the pods to which this NetworkPolicy object applies. The array of - // ingress rules is applied to any pods selected by this field. Multiple network - // policies can select the same set of pods. In this case, the ingress rules for - // each are combined additively. This field is NOT optional and follows standard - // label selector semantics. An empty podSelector matches all pods in this - // namespace. + // podSelector selects the pods to which this NetworkPolicy object applies. + // The array of ingress rules is applied to any pods selected by this field. + // Multiple network policies can select the same set of pods. In this case, + // the ingress rules for each are combined additively. + // This field is NOT optional and follows standard label selector semantics. + // An empty podSelector matches all pods in this namespace. optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; - // List of ingress rules to be applied to the selected pods. Traffic is allowed to - // a pod if there are no NetworkPolicies selecting the pod + // ingress is a list of ingress rules to be applied to the selected pods. + // Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod // (and cluster policy otherwise allows the traffic), OR if the traffic source is // the pod's local node, OR if the traffic matches at least one ingress rule // across all of the NetworkPolicy objects whose podSelector matches the pod. If @@ -512,8 +512,8 @@ message NetworkPolicySpec { // +optional repeated NetworkPolicyIngressRule ingress = 2; - // List of egress rules to be applied to the selected pods. Outgoing traffic is - // allowed if there are no NetworkPolicies selecting the pod (and cluster policy + // egress is a list of egress rules to be applied to the selected pods. Outgoing traffic + // is allowed if there are no NetworkPolicies selecting the pod (and cluster policy // otherwise allows the traffic), OR if the traffic matches at least one egress rule // across all of the NetworkPolicy objects whose podSelector matches the pod. If // this field is empty then this NetworkPolicy limits all outgoing traffic (and serves @@ -522,23 +522,23 @@ message NetworkPolicySpec { // +optional repeated NetworkPolicyEgressRule egress = 3; - // List of rule types that the NetworkPolicy relates to. + // policyTypes is a list of rule types that the NetworkPolicy relates to. // Valid options are ["Ingress"], ["Egress"], or ["Ingress", "Egress"]. - // If this field is not specified, it will default based on the existence of Ingress or Egress rules; - // policies that contain an Egress section are assumed to affect Egress, and all policies - // (whether or not they contain an Ingress section) are assumed to affect Ingress. + // If this field is not specified, it will default based on the existence of ingress or egress rules; + // policies that contain an egress section are assumed to affect egress, and all policies + // (whether or not they contain an ingress section) are assumed to affect ingress. // If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. // Likewise, if you want to write a policy that specifies that no egress is allowed, // you must specify a policyTypes value that include "Egress" (since such a policy would not include - // an Egress section and would otherwise default to just [ "Ingress" ]). + // an egress section and would otherwise default to just [ "Ingress" ]). // This field is beta-level in 1.8 // +optional repeated string policyTypes = 4; } -// NetworkPolicyStatus describe the current state of the NetworkPolicy. +// NetworkPolicyStatus describes the current state of the NetworkPolicy. message NetworkPolicyStatus { - // Conditions holds an array of metav1.Condition that describe the state of the NetworkPolicy. + // conditions holds an array of metav1.Condition that describe the state of the NetworkPolicy. // Current service state // +optional // +patchMergeKey=type @@ -550,12 +550,12 @@ message NetworkPolicyStatus { // ServiceBackendPort is the service port being referenced. message ServiceBackendPort { - // Name is the name of the port on the Service. + // name is the name of the port on the Service. // This is a mutually exclusive setting with "Number". // +optional optional string name = 1; - // Number is the numerical port number (e.g. 80) on the Service. + // number is the numerical port number (e.g. 80) on the Service. // This is a mutually exclusive setting with "Name". // +optional optional int32 number = 2; diff --git a/vendor/k8s.io/api/networking/v1/types.go b/vendor/k8s.io/api/networking/v1/types.go index a9deb900a0..fa7cf1bd70 100644 --- a/vendor/k8s.io/api/networking/v1/types.go +++ b/vendor/k8s.io/api/networking/v1/types.go @@ -28,16 +28,17 @@ import ( // NetworkPolicy describes what network traffic is allowed for a set of Pods type NetworkPolicy struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the desired behavior for this NetworkPolicy. + // spec represents the specification of the desired behavior for this NetworkPolicy. // +optional Spec NetworkPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // Status is the current state of the NetworkPolicy. + // status represents the current state of the NetworkPolicy. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status NetworkPolicyStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` @@ -57,16 +58,16 @@ const ( // NetworkPolicySpec provides the specification of a NetworkPolicy type NetworkPolicySpec struct { - // Selects the pods to which this NetworkPolicy object applies. The array of - // ingress rules is applied to any pods selected by this field. Multiple network - // policies can select the same set of pods. In this case, the ingress rules for - // each are combined additively. This field is NOT optional and follows standard - // label selector semantics. An empty podSelector matches all pods in this - // namespace. + // podSelector selects the pods to which this NetworkPolicy object applies. + // The array of ingress rules is applied to any pods selected by this field. + // Multiple network policies can select the same set of pods. In this case, + // the ingress rules for each are combined additively. + // This field is NOT optional and follows standard label selector semantics. + // An empty podSelector matches all pods in this namespace. PodSelector metav1.LabelSelector `json:"podSelector" protobuf:"bytes,1,opt,name=podSelector"` - // List of ingress rules to be applied to the selected pods. Traffic is allowed to - // a pod if there are no NetworkPolicies selecting the pod + // ingress is a list of ingress rules to be applied to the selected pods. + // Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod // (and cluster policy otherwise allows the traffic), OR if the traffic source is // the pod's local node, OR if the traffic matches at least one ingress rule // across all of the NetworkPolicy objects whose podSelector matches the pod. If @@ -75,8 +76,8 @@ type NetworkPolicySpec struct { // +optional Ingress []NetworkPolicyIngressRule `json:"ingress,omitempty" protobuf:"bytes,2,rep,name=ingress"` - // List of egress rules to be applied to the selected pods. Outgoing traffic is - // allowed if there are no NetworkPolicies selecting the pod (and cluster policy + // egress is a list of egress rules to be applied to the selected pods. Outgoing traffic + // is allowed if there are no NetworkPolicies selecting the pod (and cluster policy // otherwise allows the traffic), OR if the traffic matches at least one egress rule // across all of the NetworkPolicy objects whose podSelector matches the pod. If // this field is empty then this NetworkPolicy limits all outgoing traffic (and serves @@ -85,15 +86,15 @@ type NetworkPolicySpec struct { // +optional Egress []NetworkPolicyEgressRule `json:"egress,omitempty" protobuf:"bytes,3,rep,name=egress"` - // List of rule types that the NetworkPolicy relates to. + // policyTypes is a list of rule types that the NetworkPolicy relates to. // Valid options are ["Ingress"], ["Egress"], or ["Ingress", "Egress"]. - // If this field is not specified, it will default based on the existence of Ingress or Egress rules; - // policies that contain an Egress section are assumed to affect Egress, and all policies - // (whether or not they contain an Ingress section) are assumed to affect Ingress. + // If this field is not specified, it will default based on the existence of ingress or egress rules; + // policies that contain an egress section are assumed to affect egress, and all policies + // (whether or not they contain an ingress section) are assumed to affect ingress. // If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. // Likewise, if you want to write a policy that specifies that no egress is allowed, // you must specify a policyTypes value that include "Egress" (since such a policy would not include - // an Egress section and would otherwise default to just [ "Ingress" ]). + // an egress section and would otherwise default to just [ "Ingress" ]). // This field is beta-level in 1.8 // +optional PolicyTypes []PolicyType `json:"policyTypes,omitempty" protobuf:"bytes,4,rep,name=policyTypes,casttype=PolicyType"` @@ -102,15 +103,15 @@ type NetworkPolicySpec struct { // NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods // matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from. type NetworkPolicyIngressRule struct { - // List of ports which should be made accessible on the pods selected for this - // rule. Each item in this list is combined using a logical OR. If this field is + // ports is a list of ports which should be made accessible on the pods selected for + // this rule. Each item in this list is combined using a logical OR. If this field is // empty or missing, this rule matches all ports (traffic not restricted by port). // If this field is present and contains at least one item, then this rule allows // traffic only if the traffic matches at least one port in the list. // +optional Ports []NetworkPolicyPort `json:"ports,omitempty" protobuf:"bytes,1,rep,name=ports"` - // List of sources which should be able to access the pods selected for this rule. + // from is a list of sources which should be able to access the pods selected for this rule. // Items in this list are combined using a logical OR operation. If this field is // empty or missing, this rule matches all sources (traffic not restricted by // source). If this field is present and contains at least one item, this rule @@ -123,7 +124,7 @@ type NetworkPolicyIngressRule struct { // matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. // This type is beta-level in 1.8 type NetworkPolicyEgressRule struct { - // List of destination ports for outgoing traffic. + // ports is a list of destination ports for outgoing traffic. // Each item in this list is combined using a logical OR. If this field is // empty or missing, this rule matches all ports (traffic not restricted by port). // If this field is present and contains at least one item, then this rule allows @@ -131,7 +132,7 @@ type NetworkPolicyEgressRule struct { // +optional Ports []NetworkPolicyPort `json:"ports,omitempty" protobuf:"bytes,1,rep,name=ports"` - // List of destinations for outgoing traffic of pods selected for this rule. + // to is a list of destinations for outgoing traffic of pods selected for this rule. // Items in this list are combined using a logical OR operation. If this field is // empty or missing, this rule matches all destinations (traffic not restricted by // destination). If this field is present and contains at least one item, this rule @@ -142,19 +143,19 @@ type NetworkPolicyEgressRule struct { // NetworkPolicyPort describes a port to allow traffic on type NetworkPolicyPort struct { - // The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this - // field defaults to TCP. + // protocol represents the protocol (TCP, UDP, or SCTP) which traffic must match. + // If not specified, this field defaults to TCP. // +optional Protocol *v1.Protocol `json:"protocol,omitempty" protobuf:"bytes,1,opt,name=protocol,casttype=k8s.io/api/core/v1.Protocol"` - // The port on the given protocol. This can either be a numerical or named + // port represents the port on the given protocol. This can either be a numerical or named // port on a pod. If this field is not provided, this matches all port names and // numbers. // If present, only traffic on the specified protocol AND port will be matched. // +optional Port *intstr.IntOrString `json:"port,omitempty" protobuf:"bytes,2,opt,name=port"` - // If set, indicates that the range of ports from port to endPort, inclusive, + // endPort indicates that the range of ports from port to endPort if set, inclusive, // should be allowed by the policy. This field cannot be defined if the port field // is not defined or if the port field is defined as a named (string) port. // The endPort must be equal or greater than port. @@ -166,12 +167,13 @@ type NetworkPolicyPort struct { // to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs // that should not be included within this rule. type IPBlock struct { - // CIDR is a string representing the IP Block + // cidr is a string representing the IPBlock // Valid examples are "192.168.1.0/24" or "2001:db8::/64" CIDR string `json:"cidr" protobuf:"bytes,1,name=cidr"` - // Except is a slice of CIDRs that should not be included within an IP Block + + // except is a slice of CIDRs that should not be included within an IPBlock // Valid examples are "192.168.1.0/24" or "2001:db8::/64" - // Except values will be rejected if they are outside the CIDR range + // Except values will be rejected if they are outside the cidr range // +optional Except []string `json:"except,omitempty" protobuf:"bytes,2,rep,name=except"` } @@ -179,25 +181,25 @@ type IPBlock struct { // NetworkPolicyPeer describes a peer to allow traffic to/from. Only certain combinations of // fields are allowed type NetworkPolicyPeer struct { - // This is a label selector which selects Pods. This field follows standard label + // podSelector is a label selector which selects pods. This field follows standard label // selector semantics; if present but empty, it selects all pods. // - // If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects - // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. - // Otherwise it selects the Pods matching PodSelector in the policy's own Namespace. + // If namespaceSelector is also set, then the NetworkPolicyPeer as a whole selects + // the pods matching podSelector in the Namespaces selected by NamespaceSelector. + // Otherwise it selects the pods matching podSelector in the policy's own namespace. // +optional PodSelector *metav1.LabelSelector `json:"podSelector,omitempty" protobuf:"bytes,1,opt,name=podSelector"` - // Selects Namespaces using cluster-scoped labels. This field follows standard label - // selector semantics; if present but empty, it selects all namespaces. + // namespaceSelector selects namespaces using cluster-scoped labels. This field follows + // standard label selector semantics; if present but empty, it selects all namespaces. // - // If PodSelector is also set, then the NetworkPolicyPeer as a whole selects - // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. - // Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector. + // If podSelector is also set, then the NetworkPolicyPeer as a whole selects + // the pods matching podSelector in the namespaces selected by namespaceSelector. + // Otherwise it selects all pods in the namespaces selected by namespaceSelector. // +optional NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,2,opt,name=namespaceSelector"` - // IPBlock defines policy on a particular IPBlock. If this field is set then + // ipBlock defines policy on a particular IPBlock. If this field is set then // neither of the other fields can be. // +optional IPBlock *IPBlock `json:"ipBlock,omitempty" protobuf:"bytes,3,rep,name=ipBlock"` @@ -233,9 +235,9 @@ const ( NetworkPolicyConditionReasonFeatureNotSupported NetworkPolicyConditionReason = "FeatureNotSupported" ) -// NetworkPolicyStatus describe the current state of the NetworkPolicy. +// NetworkPolicyStatus describes the current state of the NetworkPolicy. type NetworkPolicyStatus struct { - // Conditions holds an array of metav1.Condition that describe the state of the NetworkPolicy. + // conditions holds an array of metav1.Condition that describe the state of the NetworkPolicy. // Current service state // +optional // +patchMergeKey=type @@ -250,12 +252,13 @@ type NetworkPolicyStatus struct { // NetworkPolicyList is a list of NetworkPolicy objects. type NetworkPolicyList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of schema objects. + // items is a list of schema objects. Items []NetworkPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -268,17 +271,18 @@ type NetworkPolicyList struct { // based virtual hosting etc. type Ingress struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec is the desired state of the Ingress. + // spec is the desired state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec IngressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // Status is the current state of the Ingress. + // status is the current state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status IngressStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` @@ -289,18 +293,19 @@ type Ingress struct { // IngressList is a collection of Ingress. type IngressList struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of Ingress. + // items is the list of Ingress. Items []Ingress `json:"items" protobuf:"bytes,2,rep,name=items"` } // IngressSpec describes the Ingress the user wishes to exist. type IngressSpec struct { - // IngressClassName is the name of an IngressClass cluster resource. Ingress + // ingressClassName is the name of an IngressClass cluster resource. Ingress // controller implementations use this field to know whether they should be // serving this Ingress resource, by a transitive connection // (controller -> IngressClass -> Ingress resource). Although the @@ -313,72 +318,73 @@ type IngressSpec struct { // +optional IngressClassName *string `json:"ingressClassName,omitempty" protobuf:"bytes,4,opt,name=ingressClassName"` - // DefaultBackend is the backend that should handle requests that don't + // defaultBackend is the backend that should handle requests that don't // match any rule. If Rules are not specified, DefaultBackend must be specified. // If DefaultBackend is not set, the handling of requests that do not match any // of the rules will be up to the Ingress controller. // +optional DefaultBackend *IngressBackend `json:"defaultBackend,omitempty" protobuf:"bytes,1,opt,name=defaultBackend"` - // TLS configuration. Currently the Ingress only supports a single TLS - // port, 443. If multiple members of this list specify different hosts, they - // will be multiplexed on the same port according to the hostname specified + // tls represents the TLS configuration. Currently the Ingress only supports a + // single TLS port, 443. If multiple members of this list specify different hosts, + // they will be multiplexed on the same port according to the hostname specified // through the SNI TLS extension, if the ingress controller fulfilling the // ingress supports SNI. // +listType=atomic // +optional TLS []IngressTLS `json:"tls,omitempty" protobuf:"bytes,2,rep,name=tls"` - // A list of host rules used to configure the Ingress. If unspecified, or - // no rule matches, all traffic is sent to the default backend. + // rules is a list of host rules used to configure the Ingress. If unspecified, + // or no rule matches, all traffic is sent to the default backend. // +listType=atomic // +optional Rules []IngressRule `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` } -// IngressTLS describes the transport layer security associated with an Ingress. +// IngressTLS describes the transport layer security associated with an ingress. type IngressTLS struct { - // Hosts are a list of hosts included in the TLS certificate. The values in + // hosts is a list of hosts included in the TLS certificate. The values in // this list must match the name/s used in the tlsSecret. Defaults to the // wildcard host setting for the loadbalancer controller fulfilling this // Ingress, if left unspecified. // +listType=atomic // +optional Hosts []string `json:"hosts,omitempty" protobuf:"bytes,1,rep,name=hosts"` - // SecretName is the name of the secret used to terminate TLS traffic on + + // secretName is the name of the secret used to terminate TLS traffic on // port 443. Field is left optional to allow TLS routing based on SNI // hostname alone. If the SNI host in a listener conflicts with the "Host" // header field used by an IngressRule, the SNI host is used for termination - // and value of the Host header is used for routing. + // and value of the "Host" header is used for routing. // +optional SecretName string `json:"secretName,omitempty" protobuf:"bytes,2,opt,name=secretName"` } // IngressStatus describe the current state of the Ingress. type IngressStatus struct { - // LoadBalancer contains the current status of the load-balancer. + // loadBalancer contains the current status of the load-balancer. // +optional LoadBalancer IngressLoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"` } // IngressLoadBalancerStatus represents the status of a load-balancer. type IngressLoadBalancerStatus struct { - // Ingress is a list containing ingress points for the load-balancer. + // ingress is a list containing ingress points for the load-balancer. // +optional Ingress []IngressLoadBalancerIngress `json:"ingress,omitempty" protobuf:"bytes,1,rep,name=ingress"` } // IngressLoadBalancerIngress represents the status of a load-balancer ingress point. type IngressLoadBalancerIngress struct { - // IP is set for load-balancer ingress points that are IP based. + // ip is set for load-balancer ingress points that are IP based. // +optional IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"` - // Hostname is set for load-balancer ingress points that are DNS based. + // hostname is set for load-balancer ingress points that are DNS based. // +optional Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"` - // Ports provides information about the ports exposed by this LoadBalancer. + // ports provides information about the ports exposed by this LoadBalancer. // +listType=atomic // +optional Ports []IngressPortStatus `json:"ports,omitempty" protobuf:"bytes,4,rep,name=ports"` @@ -386,14 +392,14 @@ type IngressLoadBalancerIngress struct { // IngressPortStatus represents the error condition of a service port type IngressPortStatus struct { - // Port is the port number of the ingress port. + // port is the port number of the ingress port. Port int32 `json:"port" protobuf:"varint,1,opt,name=port"` - // Protocol is the protocol of the ingress port. + // protocol is the protocol of the ingress port. // The supported values are: "TCP", "UDP", "SCTP" Protocol v1.Protocol `json:"protocol" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"` - // Error is to record the problem with the service port + // error is to record the problem with the service port // The format of the error shall comply with the following rules: // - built-in error values shall be specified in this file and those shall use // CamelCase names @@ -412,7 +418,7 @@ type IngressPortStatus struct { // the related backend services. Incoming requests are first evaluated for a host // match, then routed to the backend associated with the matching IngressRuleValue. type IngressRule struct { - // Host is the fully qualified domain name of a network host, as defined by RFC 3986. + // host is the fully qualified domain name of a network host, as defined by RFC 3986. // Note the following deviations from the "host" part of the // URI as defined in RFC 3986: // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to @@ -425,14 +431,14 @@ type IngressRule struct { // IngressRuleValue. If the host is unspecified, the Ingress routes all // traffic based on the specified IngressRuleValue. // - // Host can be "precise" which is a domain name without the terminating dot of + // host can be "precise" which is a domain name without the terminating dot of // a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name // prefixed with a single wildcard label (e.g. "*.foo.com"). // The wildcard character '*' must appear by itself as the first DNS label and // matches only a single label. You cannot have a wildcard label by itself (e.g. Host == "*"). // Requests will be matched against the Host field in the following way: - // 1. If Host is precise, the request matches this rule if the http host header is equal to Host. - // 2. If Host is a wildcard, then the request matches this rule if the http host header + // 1. If host is precise, the request matches this rule if the http host header is equal to Host. + // 2. If host is a wildcard, then the request matches this rule if the http host header // is to equal to the suffix (removing the first label) of the wildcard rule. // +optional Host string `json:"host,omitempty" protobuf:"bytes,1,opt,name=host"` @@ -460,7 +466,7 @@ type IngressRuleValue struct { // to match against everything after the last '/' and before the first '?' // or '#'. type HTTPIngressRuleValue struct { - // A collection of paths that map requests to backends. + // paths is a collection of paths that map requests to backends. // +listType=atomic Paths []HTTPIngressPath `json:"paths" protobuf:"bytes,1,rep,name=paths"` } @@ -499,14 +505,14 @@ const ( // HTTPIngressPath associates a path with a backend. Incoming urls matching the // path are forwarded to the backend. type HTTPIngressPath struct { - // Path is matched against the path of an incoming request. Currently it can + // path is matched against the path of an incoming request. Currently it can // contain characters disallowed from the conventional "path" part of a URL // as defined by RFC 3986. Paths must begin with a '/' and must be present // when using PathType with value "Exact" or "Prefix". // +optional Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` - // PathType determines the interpretation of the Path matching. PathType can + // pathType determines the interpretation of the path matching. PathType can // be one of the following values: // * Exact: Matches the URL path exactly. // * Prefix: Matches based on a URL path prefix split by '/'. Matching is @@ -522,19 +528,19 @@ type HTTPIngressPath struct { // Implementations are required to support all path types. PathType *PathType `json:"pathType" protobuf:"bytes,3,opt,name=pathType"` - // Backend defines the referenced service endpoint to which the traffic + // backend defines the referenced service endpoint to which the traffic // will be forwarded to. Backend IngressBackend `json:"backend" protobuf:"bytes,2,opt,name=backend"` } // IngressBackend describes all endpoints for a given service and port. type IngressBackend struct { - // Service references a Service as a Backend. + // service references a service as a backend. // This is a mutually exclusive setting with "Resource". // +optional Service *IngressServiceBackend `json:"service,omitempty" protobuf:"bytes,4,opt,name=service"` - // Resource is an ObjectRef to another Kubernetes resource in the namespace + // resource is an ObjectRef to another Kubernetes resource in the namespace // of the Ingress object. If resource is specified, a service.Name and // service.Port must not be specified. // This is a mutually exclusive setting with "Service". @@ -544,23 +550,23 @@ type IngressBackend struct { // IngressServiceBackend references a Kubernetes Service as a Backend. type IngressServiceBackend struct { - // Name is the referenced service. The service must exist in + // name is the referenced service. The service must exist in // the same namespace as the Ingress object. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Port of the referenced service. A port name or port number + // port of the referenced service. A port name or port number // is required for a IngressServiceBackend. Port ServiceBackendPort `json:"port,omitempty" protobuf:"bytes,2,opt,name=port"` } // ServiceBackendPort is the service port being referenced. type ServiceBackendPort struct { - // Name is the name of the port on the Service. + // name is the name of the port on the Service. // This is a mutually exclusive setting with "Number". // +optional Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` - // Number is the numerical port number (e.g. 80) on the Service. + // number is the numerical port number (e.g. 80) on the Service. // This is a mutually exclusive setting with "Name". // +optional Number int32 `json:"number,omitempty" protobuf:"bytes,2,opt,name=number"` @@ -577,12 +583,13 @@ type ServiceBackendPort struct { // resources without a class specified will be assigned this default class. type IngressClass struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec is the desired state of the IngressClass. + // spec is the desired state of the IngressClass. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec IngressClassSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -590,15 +597,15 @@ type IngressClass struct { // IngressClassSpec provides information about the class of an Ingress. type IngressClassSpec struct { - // Controller refers to the name of the controller that should handle this + // controller refers to the name of the controller that should handle this // class. This allows for different "flavors" that are controlled by the - // same controller. For example, you may have different Parameters for the + // same controller. For example, you may have different parameters for the // same implementing controller. This should be specified as a // domain-prefixed path no more than 250 characters in length, e.g. // "acme.io/ingress-controller". This field is immutable. Controller string `json:"controller,omitempty" protobuf:"bytes,1,opt,name=controller"` - // Parameters is a link to a custom resource containing additional + // parameters is a link to a custom resource containing additional // configuration for the controller. This is optional if the controller does // not require extra parameters. // +optional @@ -617,20 +624,24 @@ const ( // IngressClassParametersReference identifies an API object. This can be used // to specify a cluster or namespace-scoped resource. type IngressClassParametersReference struct { - // APIGroup is the group for the resource being referenced. If APIGroup is + // apiGroup is the group for the resource being referenced. If APIGroup is // not specified, the specified Kind must be in the core API group. For any // other third-party types, APIGroup is required. // +optional APIGroup *string `json:"apiGroup,omitempty" protobuf:"bytes,1,opt,name=aPIGroup"` - // Kind is the type of resource being referenced. + + // kind is the type of resource being referenced. Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"` - // Name is the name of resource being referenced. + + // name is the name of resource being referenced. Name string `json:"name" protobuf:"bytes,3,opt,name=name"` - // Scope represents if this refers to a cluster or namespace scoped resource. + + // scope represents if this refers to a cluster or namespace scoped resource. // This may be set to "Cluster" (default) or "Namespace". // +optional Scope *string `json:"scope" protobuf:"bytes,4,opt,name=scope"` - // Namespace is the namespace of the resource being referenced. This field is + + // namespace is the namespace of the resource being referenced. This field is // required when scope is set to "Namespace" and must be unset when scope is set to // "Cluster". // +optional @@ -642,10 +653,11 @@ type IngressClassParametersReference struct { // IngressClassList is a collection of IngressClasses. type IngressClassList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of IngressClasses. + // items is the list of IngressClasses. Items []IngressClass `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go index 94ccf964b7..91161d5ca4 100644 --- a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go @@ -24,14 +24,14 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_HTTPIngressPath = map[string]string{ "": "HTTPIngressPath associates a path with a backend. Incoming urls matching the path are forwarded to the backend.", - "path": "Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/' and must be present when using PathType with value \"Exact\" or \"Prefix\".", - "pathType": "PathType determines the interpretation of the Path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\n done on a path element by element basis. A path element refers is the\n list of labels in the path split by the '/' separator. A request is a\n match for path p if every p is an element-wise prefix of p of the\n request path. Note that if the last element of the path is a substring\n of the last element in request path, it is not a match (e.g. /foo/bar\n matches /foo/bar/baz, but does not match /foo/barbaz).\n* ImplementationSpecific: Interpretation of the Path matching is up to\n the IngressClass. Implementations can treat this as a separate PathType\n or treat it identically to Prefix or Exact path types.\nImplementations are required to support all path types.", - "backend": "Backend defines the referenced service endpoint to which the traffic will be forwarded to.", + "path": "path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/' and must be present when using PathType with value \"Exact\" or \"Prefix\".", + "pathType": "pathType determines the interpretation of the path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\n done on a path element by element basis. A path element refers is the\n list of labels in the path split by the '/' separator. A request is a\n match for path p if every p is an element-wise prefix of p of the\n request path. Note that if the last element of the path is a substring\n of the last element in request path, it is not a match (e.g. /foo/bar\n matches /foo/bar/baz, but does not match /foo/barbaz).\n* ImplementationSpecific: Interpretation of the Path matching is up to\n the IngressClass. Implementations can treat this as a separate PathType\n or treat it identically to Prefix or Exact path types.\nImplementations are required to support all path types.", + "backend": "backend defines the referenced service endpoint to which the traffic will be forwarded to.", } func (HTTPIngressPath) SwaggerDoc() map[string]string { @@ -40,7 +40,7 @@ func (HTTPIngressPath) SwaggerDoc() map[string]string { var map_HTTPIngressRuleValue = map[string]string{ "": "HTTPIngressRuleValue is a list of http selectors pointing to backends. In the example: http:///? -> backend where where parts of the url correspond to RFC 3986, this resource will be used to match against everything after the last '/' and before the first '?' or '#'.", - "paths": "A collection of paths that map requests to backends.", + "paths": "paths is a collection of paths that map requests to backends.", } func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { @@ -49,8 +49,8 @@ func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { var map_IPBlock = map[string]string{ "": "IPBlock describes a particular CIDR (Ex. \"192.168.1.0/24\",\"2001:db8::/64\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.", - "cidr": "CIDR is a string representing the IP Block Valid examples are \"192.168.1.0/24\" or \"2001:db8::/64\"", - "except": "Except is a slice of CIDRs that should not be included within an IP Block Valid examples are \"192.168.1.0/24\" or \"2001:db8::/64\" Except values will be rejected if they are outside the CIDR range", + "cidr": "cidr is a string representing the IPBlock Valid examples are \"192.168.1.0/24\" or \"2001:db8::/64\"", + "except": "except is a slice of CIDRs that should not be included within an IPBlock Valid examples are \"192.168.1.0/24\" or \"2001:db8::/64\" Except values will be rejected if they are outside the cidr range", } func (IPBlock) SwaggerDoc() map[string]string { @@ -60,8 +60,8 @@ func (IPBlock) SwaggerDoc() map[string]string { var map_Ingress = map[string]string{ "": "Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", - "status": "Status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Ingress) SwaggerDoc() map[string]string { @@ -70,8 +70,8 @@ func (Ingress) SwaggerDoc() map[string]string { var map_IngressBackend = map[string]string{ "": "IngressBackend describes all endpoints for a given service and port.", - "service": "Service references a Service as a Backend. This is a mutually exclusive setting with \"Resource\".", - "resource": "Resource is an ObjectRef to another Kubernetes resource in the namespace of the Ingress object. If resource is specified, a service.Name and service.Port must not be specified. This is a mutually exclusive setting with \"Service\".", + "service": "service references a service as a backend. This is a mutually exclusive setting with \"Resource\".", + "resource": "resource is an ObjectRef to another Kubernetes resource in the namespace of the Ingress object. If resource is specified, a service.Name and service.Port must not be specified. This is a mutually exclusive setting with \"Service\".", } func (IngressBackend) SwaggerDoc() map[string]string { @@ -81,7 +81,7 @@ func (IngressBackend) SwaggerDoc() map[string]string { var map_IngressClass = map[string]string{ "": "IngressClass represents the class of the Ingress, referenced by the Ingress Spec. The `ingressclass.kubernetes.io/is-default-class` annotation can be used to indicate that an IngressClass should be considered default. When a single IngressClass resource has this annotation set to true, new Ingress resources without a class specified will be assigned this default class.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec is the desired state of the IngressClass. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec is the desired state of the IngressClass. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (IngressClass) SwaggerDoc() map[string]string { @@ -91,7 +91,7 @@ func (IngressClass) SwaggerDoc() map[string]string { var map_IngressClassList = map[string]string{ "": "IngressClassList is a collection of IngressClasses.", "metadata": "Standard list metadata.", - "items": "Items is the list of IngressClasses.", + "items": "items is the list of IngressClasses.", } func (IngressClassList) SwaggerDoc() map[string]string { @@ -100,11 +100,11 @@ func (IngressClassList) SwaggerDoc() map[string]string { var map_IngressClassParametersReference = map[string]string{ "": "IngressClassParametersReference identifies an API object. This can be used to specify a cluster or namespace-scoped resource.", - "apiGroup": "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", - "kind": "Kind is the type of resource being referenced.", - "name": "Name is the name of resource being referenced.", - "scope": "Scope represents if this refers to a cluster or namespace scoped resource. This may be set to \"Cluster\" (default) or \"Namespace\".", - "namespace": "Namespace is the namespace of the resource being referenced. This field is required when scope is set to \"Namespace\" and must be unset when scope is set to \"Cluster\".", + "apiGroup": "apiGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", + "kind": "kind is the type of resource being referenced.", + "name": "name is the name of resource being referenced.", + "scope": "scope represents if this refers to a cluster or namespace scoped resource. This may be set to \"Cluster\" (default) or \"Namespace\".", + "namespace": "namespace is the namespace of the resource being referenced. This field is required when scope is set to \"Namespace\" and must be unset when scope is set to \"Cluster\".", } func (IngressClassParametersReference) SwaggerDoc() map[string]string { @@ -113,8 +113,8 @@ func (IngressClassParametersReference) SwaggerDoc() map[string]string { var map_IngressClassSpec = map[string]string{ "": "IngressClassSpec provides information about the class of an Ingress.", - "controller": "Controller refers to the name of the controller that should handle this class. This allows for different \"flavors\" that are controlled by the same controller. For example, you may have different Parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \"acme.io/ingress-controller\". This field is immutable.", - "parameters": "Parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters.", + "controller": "controller refers to the name of the controller that should handle this class. This allows for different \"flavors\" that are controlled by the same controller. For example, you may have different parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \"acme.io/ingress-controller\". This field is immutable.", + "parameters": "parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters.", } func (IngressClassSpec) SwaggerDoc() map[string]string { @@ -124,7 +124,7 @@ func (IngressClassSpec) SwaggerDoc() map[string]string { var map_IngressList = map[string]string{ "": "IngressList is a collection of Ingress.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of Ingress.", + "items": "items is the list of Ingress.", } func (IngressList) SwaggerDoc() map[string]string { @@ -133,9 +133,9 @@ func (IngressList) SwaggerDoc() map[string]string { var map_IngressLoadBalancerIngress = map[string]string{ "": "IngressLoadBalancerIngress represents the status of a load-balancer ingress point.", - "ip": "IP is set for load-balancer ingress points that are IP based.", - "hostname": "Hostname is set for load-balancer ingress points that are DNS based.", - "ports": "Ports provides information about the ports exposed by this LoadBalancer.", + "ip": "ip is set for load-balancer ingress points that are IP based.", + "hostname": "hostname is set for load-balancer ingress points that are DNS based.", + "ports": "ports provides information about the ports exposed by this LoadBalancer.", } func (IngressLoadBalancerIngress) SwaggerDoc() map[string]string { @@ -144,7 +144,7 @@ func (IngressLoadBalancerIngress) SwaggerDoc() map[string]string { var map_IngressLoadBalancerStatus = map[string]string{ "": "IngressLoadBalancerStatus represents the status of a load-balancer.", - "ingress": "Ingress is a list containing ingress points for the load-balancer.", + "ingress": "ingress is a list containing ingress points for the load-balancer.", } func (IngressLoadBalancerStatus) SwaggerDoc() map[string]string { @@ -153,9 +153,9 @@ func (IngressLoadBalancerStatus) SwaggerDoc() map[string]string { var map_IngressPortStatus = map[string]string{ "": "IngressPortStatus represents the error condition of a service port", - "port": "Port is the port number of the ingress port.", - "protocol": "Protocol is the protocol of the ingress port. The supported values are: \"TCP\", \"UDP\", \"SCTP\"", - "error": "Error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use\n CamelCase names\n- cloud provider specific error values must have names that comply with the\n format foo.example.com/CamelCase.", + "port": "port is the port number of the ingress port.", + "protocol": "protocol is the protocol of the ingress port. The supported values are: \"TCP\", \"UDP\", \"SCTP\"", + "error": "error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use\n CamelCase names\n- cloud provider specific error values must have names that comply with the\n format foo.example.com/CamelCase.", } func (IngressPortStatus) SwaggerDoc() map[string]string { @@ -164,7 +164,7 @@ func (IngressPortStatus) SwaggerDoc() map[string]string { var map_IngressRule = map[string]string{ "": "IngressRule represents the rules mapping the paths under a specified host to the related backend services. Incoming requests are first evaluated for a host match, then routed to the backend associated with the matching IngressRuleValue.", - "host": "Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to\n the IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.\n\nHost can be \"precise\" which is a domain name without the terminating dot of a network host (e.g. \"foo.bar.com\") or \"wildcard\", which is a domain name prefixed with a single wildcard label (e.g. \"*.foo.com\"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == \"*\"). Requests will be matched against the Host field in the following way: 1. If Host is precise, the request matches this rule if the http host header is equal to Host. 2. If Host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.", + "host": "host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to\n the IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.\n\nhost can be \"precise\" which is a domain name without the terminating dot of a network host (e.g. \"foo.bar.com\") or \"wildcard\", which is a domain name prefixed with a single wildcard label (e.g. \"*.foo.com\"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == \"*\"). Requests will be matched against the Host field in the following way: 1. If host is precise, the request matches this rule if the http host header is equal to Host. 2. If host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.", } func (IngressRule) SwaggerDoc() map[string]string { @@ -181,8 +181,8 @@ func (IngressRuleValue) SwaggerDoc() map[string]string { var map_IngressServiceBackend = map[string]string{ "": "IngressServiceBackend references a Kubernetes Service as a Backend.", - "name": "Name is the referenced service. The service must exist in the same namespace as the Ingress object.", - "port": "Port of the referenced service. A port name or port number is required for a IngressServiceBackend.", + "name": "name is the referenced service. The service must exist in the same namespace as the Ingress object.", + "port": "port of the referenced service. A port name or port number is required for a IngressServiceBackend.", } func (IngressServiceBackend) SwaggerDoc() map[string]string { @@ -191,10 +191,10 @@ func (IngressServiceBackend) SwaggerDoc() map[string]string { var map_IngressSpec = map[string]string{ "": "IngressSpec describes the Ingress the user wishes to exist.", - "ingressClassName": "IngressClassName is the name of an IngressClass cluster resource. Ingress controller implementations use this field to know whether they should be serving this Ingress resource, by a transitive connection (controller -> IngressClass -> Ingress resource). Although the `kubernetes.io/ingress.class` annotation (simple constant name) was never formally defined, it was widely supported by Ingress controllers to create a direct binding between Ingress controller and Ingress resources. Newly created Ingress resources should prefer using the field. However, even though the annotation is officially deprecated, for backwards compatibility reasons, ingress controllers should still honor that annotation if present.", - "defaultBackend": "DefaultBackend is the backend that should handle requests that don't match any rule. If Rules are not specified, DefaultBackend must be specified. If DefaultBackend is not set, the handling of requests that do not match any of the rules will be up to the Ingress controller.", - "tls": "TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.", - "rules": "A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.", + "ingressClassName": "ingressClassName is the name of an IngressClass cluster resource. Ingress controller implementations use this field to know whether they should be serving this Ingress resource, by a transitive connection (controller -> IngressClass -> Ingress resource). Although the `kubernetes.io/ingress.class` annotation (simple constant name) was never formally defined, it was widely supported by Ingress controllers to create a direct binding between Ingress controller and Ingress resources. Newly created Ingress resources should prefer using the field. However, even though the annotation is officially deprecated, for backwards compatibility reasons, ingress controllers should still honor that annotation if present.", + "defaultBackend": "defaultBackend is the backend that should handle requests that don't match any rule. If Rules are not specified, DefaultBackend must be specified. If DefaultBackend is not set, the handling of requests that do not match any of the rules will be up to the Ingress controller.", + "tls": "tls represents the TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.", + "rules": "rules is a list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.", } func (IngressSpec) SwaggerDoc() map[string]string { @@ -203,7 +203,7 @@ func (IngressSpec) SwaggerDoc() map[string]string { var map_IngressStatus = map[string]string{ "": "IngressStatus describe the current state of the Ingress.", - "loadBalancer": "LoadBalancer contains the current status of the load-balancer.", + "loadBalancer": "loadBalancer contains the current status of the load-balancer.", } func (IngressStatus) SwaggerDoc() map[string]string { @@ -211,9 +211,9 @@ func (IngressStatus) SwaggerDoc() map[string]string { } var map_IngressTLS = map[string]string{ - "": "IngressTLS describes the transport layer security associated with an Ingress.", - "hosts": "Hosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.", - "secretName": "SecretName is the name of the secret used to terminate TLS traffic on port 443. Field is left optional to allow TLS routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \"Host\" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing.", + "": "IngressTLS describes the transport layer security associated with an ingress.", + "hosts": "hosts is a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.", + "secretName": "secretName is the name of the secret used to terminate TLS traffic on port 443. Field is left optional to allow TLS routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \"Host\" header field used by an IngressRule, the SNI host is used for termination and value of the \"Host\" header is used for routing.", } func (IngressTLS) SwaggerDoc() map[string]string { @@ -223,8 +223,8 @@ func (IngressTLS) SwaggerDoc() map[string]string { var map_NetworkPolicy = map[string]string{ "": "NetworkPolicy describes what network traffic is allowed for a set of Pods", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the desired behavior for this NetworkPolicy.", - "status": "Status is the current state of the NetworkPolicy. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec represents the specification of the desired behavior for this NetworkPolicy.", + "status": "status represents the current state of the NetworkPolicy. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (NetworkPolicy) SwaggerDoc() map[string]string { @@ -233,8 +233,8 @@ func (NetworkPolicy) SwaggerDoc() map[string]string { var map_NetworkPolicyEgressRule = map[string]string{ "": "NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. This type is beta-level in 1.8", - "ports": "List of destination ports for outgoing traffic. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", - "to": "List of destinations for outgoing traffic of pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all destinations (traffic not restricted by destination). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the to list.", + "ports": "ports is a list of destination ports for outgoing traffic. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", + "to": "to is a list of destinations for outgoing traffic of pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all destinations (traffic not restricted by destination). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the to list.", } func (NetworkPolicyEgressRule) SwaggerDoc() map[string]string { @@ -243,8 +243,8 @@ func (NetworkPolicyEgressRule) SwaggerDoc() map[string]string { var map_NetworkPolicyIngressRule = map[string]string{ "": "NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from.", - "ports": "List of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", - "from": "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the from list.", + "ports": "ports is a list of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", + "from": "from is a list of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the from list.", } func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string { @@ -254,7 +254,7 @@ func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string { var map_NetworkPolicyList = map[string]string{ "": "NetworkPolicyList is a list of NetworkPolicy objects.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", + "items": "items is a list of schema objects.", } func (NetworkPolicyList) SwaggerDoc() map[string]string { @@ -263,9 +263,9 @@ func (NetworkPolicyList) SwaggerDoc() map[string]string { var map_NetworkPolicyPeer = map[string]string{ "": "NetworkPolicyPeer describes a peer to allow traffic to/from. Only certain combinations of fields are allowed", - "podSelector": "This is a label selector which selects Pods. This field follows standard label selector semantics; if present but empty, it selects all pods.\n\nIf NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects the Pods matching PodSelector in the policy's own Namespace.", - "namespaceSelector": "Selects Namespaces using cluster-scoped labels. This field follows standard label selector semantics; if present but empty, it selects all namespaces.\n\nIf PodSelector is also set, then the NetworkPolicyPeer as a whole selects the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector.", - "ipBlock": "IPBlock defines policy on a particular IPBlock. If this field is set then neither of the other fields can be.", + "podSelector": "podSelector is a label selector which selects pods. This field follows standard label selector semantics; if present but empty, it selects all pods.\n\nIf namespaceSelector is also set, then the NetworkPolicyPeer as a whole selects the pods matching podSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects the pods matching podSelector in the policy's own namespace.", + "namespaceSelector": "namespaceSelector selects namespaces using cluster-scoped labels. This field follows standard label selector semantics; if present but empty, it selects all namespaces.\n\nIf podSelector is also set, then the NetworkPolicyPeer as a whole selects the pods matching podSelector in the namespaces selected by namespaceSelector. Otherwise it selects all pods in the namespaces selected by namespaceSelector.", + "ipBlock": "ipBlock defines policy on a particular IPBlock. If this field is set then neither of the other fields can be.", } func (NetworkPolicyPeer) SwaggerDoc() map[string]string { @@ -274,9 +274,9 @@ func (NetworkPolicyPeer) SwaggerDoc() map[string]string { var map_NetworkPolicyPort = map[string]string{ "": "NetworkPolicyPort describes a port to allow traffic on", - "protocol": "The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this field defaults to TCP.", - "port": "The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.", - "endPort": "If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port.", + "protocol": "protocol represents the protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this field defaults to TCP.", + "port": "port represents the port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.", + "endPort": "endPort indicates that the range of ports from port to endPort if set, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port.", } func (NetworkPolicyPort) SwaggerDoc() map[string]string { @@ -285,10 +285,10 @@ func (NetworkPolicyPort) SwaggerDoc() map[string]string { var map_NetworkPolicySpec = map[string]string{ "": "NetworkPolicySpec provides the specification of a NetworkPolicy", - "podSelector": "Selects the pods to which this NetworkPolicy object applies. The array of ingress rules is applied to any pods selected by this field. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is NOT optional and follows standard label selector semantics. An empty podSelector matches all pods in this namespace.", - "ingress": "List of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)", - "egress": "List of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8", - "policyTypes": "List of rule types that the NetworkPolicy relates to. Valid options are [\"Ingress\"], [\"Egress\"], or [\"Ingress\", \"Egress\"]. If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an Egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8", + "podSelector": "podSelector selects the pods to which this NetworkPolicy object applies. The array of ingress rules is applied to any pods selected by this field. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is NOT optional and follows standard label selector semantics. An empty podSelector matches all pods in this namespace.", + "ingress": "ingress is a list of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)", + "egress": "egress is a list of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8", + "policyTypes": "policyTypes is a list of rule types that the NetworkPolicy relates to. Valid options are [\"Ingress\"], [\"Egress\"], or [\"Ingress\", \"Egress\"]. If this field is not specified, it will default based on the existence of ingress or egress rules; policies that contain an egress section are assumed to affect egress, and all policies (whether or not they contain an ingress section) are assumed to affect ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8", } func (NetworkPolicySpec) SwaggerDoc() map[string]string { @@ -296,8 +296,8 @@ func (NetworkPolicySpec) SwaggerDoc() map[string]string { } var map_NetworkPolicyStatus = map[string]string{ - "": "NetworkPolicyStatus describe the current state of the NetworkPolicy.", - "conditions": "Conditions holds an array of metav1.Condition that describe the state of the NetworkPolicy. Current service state", + "": "NetworkPolicyStatus describes the current state of the NetworkPolicy.", + "conditions": "conditions holds an array of metav1.Condition that describe the state of the NetworkPolicy. Current service state", } func (NetworkPolicyStatus) SwaggerDoc() map[string]string { @@ -306,8 +306,8 @@ func (NetworkPolicyStatus) SwaggerDoc() map[string]string { var map_ServiceBackendPort = map[string]string{ "": "ServiceBackendPort is the service port being referenced.", - "name": "Name is the name of the port on the Service. This is a mutually exclusive setting with \"Number\".", - "number": "Number is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with \"Name\".", + "name": "name is the name of the port on the Service. This is a mutually exclusive setting with \"Number\".", + "number": "number is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with \"Name\".", } func (ServiceBackendPort) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go b/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go index 48d401db88..f54d1f8242 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go @@ -31,6 +31,8 @@ import ( math_bits "math/bits" reflect "reflect" strings "strings" + + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" ) // Reference imports to suppress errors if they are not otherwise used. @@ -128,10 +130,126 @@ func (m *ClusterCIDRSpec) XXX_DiscardUnknown() { var xxx_messageInfo_ClusterCIDRSpec proto.InternalMessageInfo +func (m *IPAddress) Reset() { *m = IPAddress{} } +func (*IPAddress) ProtoMessage() {} +func (*IPAddress) Descriptor() ([]byte, []int) { + return fileDescriptor_c1b7ac8d7d97acec, []int{3} +} +func (m *IPAddress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IPAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IPAddress) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPAddress.Merge(m, src) +} +func (m *IPAddress) XXX_Size() int { + return m.Size() +} +func (m *IPAddress) XXX_DiscardUnknown() { + xxx_messageInfo_IPAddress.DiscardUnknown(m) +} + +var xxx_messageInfo_IPAddress proto.InternalMessageInfo + +func (m *IPAddressList) Reset() { *m = IPAddressList{} } +func (*IPAddressList) ProtoMessage() {} +func (*IPAddressList) Descriptor() ([]byte, []int) { + return fileDescriptor_c1b7ac8d7d97acec, []int{4} +} +func (m *IPAddressList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IPAddressList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IPAddressList) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPAddressList.Merge(m, src) +} +func (m *IPAddressList) XXX_Size() int { + return m.Size() +} +func (m *IPAddressList) XXX_DiscardUnknown() { + xxx_messageInfo_IPAddressList.DiscardUnknown(m) +} + +var xxx_messageInfo_IPAddressList proto.InternalMessageInfo + +func (m *IPAddressSpec) Reset() { *m = IPAddressSpec{} } +func (*IPAddressSpec) ProtoMessage() {} +func (*IPAddressSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_c1b7ac8d7d97acec, []int{5} +} +func (m *IPAddressSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IPAddressSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IPAddressSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPAddressSpec.Merge(m, src) +} +func (m *IPAddressSpec) XXX_Size() int { + return m.Size() +} +func (m *IPAddressSpec) XXX_DiscardUnknown() { + xxx_messageInfo_IPAddressSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_IPAddressSpec proto.InternalMessageInfo + +func (m *ParentReference) Reset() { *m = ParentReference{} } +func (*ParentReference) ProtoMessage() {} +func (*ParentReference) Descriptor() ([]byte, []int) { + return fileDescriptor_c1b7ac8d7d97acec, []int{6} +} +func (m *ParentReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ParentReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ParentReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ParentReference.Merge(m, src) +} +func (m *ParentReference) XXX_Size() int { + return m.Size() +} +func (m *ParentReference) XXX_DiscardUnknown() { + xxx_messageInfo_ParentReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ParentReference proto.InternalMessageInfo + func init() { proto.RegisterType((*ClusterCIDR)(nil), "k8s.io.api.networking.v1alpha1.ClusterCIDR") proto.RegisterType((*ClusterCIDRList)(nil), "k8s.io.api.networking.v1alpha1.ClusterCIDRList") proto.RegisterType((*ClusterCIDRSpec)(nil), "k8s.io.api.networking.v1alpha1.ClusterCIDRSpec") + proto.RegisterType((*IPAddress)(nil), "k8s.io.api.networking.v1alpha1.IPAddress") + proto.RegisterType((*IPAddressList)(nil), "k8s.io.api.networking.v1alpha1.IPAddressList") + proto.RegisterType((*IPAddressSpec)(nil), "k8s.io.api.networking.v1alpha1.IPAddressSpec") + proto.RegisterType((*ParentReference)(nil), "k8s.io.api.networking.v1alpha1.ParentReference") } func init() { @@ -139,39 +257,51 @@ func init() { } var fileDescriptor_c1b7ac8d7d97acec = []byte{ - // 506 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x93, 0x4f, 0x8f, 0xd2, 0x40, - 0x18, 0xc6, 0xe9, 0x2e, 0x24, 0x6b, 0xc1, 0xb0, 0xe9, 0x45, 0xc2, 0x61, 0x20, 0x9c, 0x48, 0x8c, - 0x33, 0xb2, 0x21, 0xc4, 0xab, 0xdd, 0x4d, 0x94, 0xc4, 0x3f, 0xd8, 0x4d, 0x3c, 0x18, 0x0f, 0x0e, - 0xe5, 0xb5, 0x8c, 0xd0, 0xce, 0x64, 0x66, 0xa8, 0xf1, 0xe6, 0x47, 0xf0, 0x2b, 0xe9, 0x89, 0xe3, - 0x1e, 0xf7, 0x44, 0xa4, 0x7e, 0x01, 0x3f, 0x82, 0x99, 0xa1, 0xbb, 0x94, 0x45, 0x57, 0xbd, 0x75, - 0xde, 0xf9, 0x3d, 0xcf, 0xfb, 0x3e, 0x7d, 0x5b, 0xf7, 0xc9, 0xec, 0x91, 0xc2, 0x8c, 0x93, 0xd9, - 0x62, 0x0c, 0x32, 0x01, 0x0d, 0x8a, 0xa4, 0x90, 0x4c, 0xb8, 0x24, 0xf9, 0x05, 0x15, 0x8c, 0x24, - 0xa0, 0x3f, 0x72, 0x39, 0x63, 0x49, 0x44, 0xd2, 0x1e, 0x9d, 0x8b, 0x29, 0xed, 0x91, 0x08, 0x12, - 0x90, 0x54, 0xc3, 0x04, 0x0b, 0xc9, 0x35, 0xf7, 0xd0, 0x86, 0xc7, 0x54, 0x30, 0xbc, 0xe5, 0xf1, - 0x15, 0xdf, 0x7c, 0x10, 0x31, 0x3d, 0x5d, 0x8c, 0x71, 0xc8, 0x63, 0x12, 0xf1, 0x88, 0x13, 0x2b, - 0x1b, 0x2f, 0xde, 0xdb, 0x93, 0x3d, 0xd8, 0xa7, 0x8d, 0x5d, 0xb3, 0x53, 0x68, 0x1f, 0x72, 0x09, - 0x24, 0xdd, 0x6b, 0xd9, 0xec, 0x6f, 0x99, 0x98, 0x86, 0x53, 0x96, 0x80, 0xfc, 0x44, 0xc4, 0x2c, - 0x32, 0x05, 0x45, 0x62, 0xd0, 0xf4, 0x77, 0x2a, 0xf2, 0x27, 0x95, 0x5c, 0x24, 0x9a, 0xc5, 0xb0, - 0x27, 0x18, 0xfc, 0x4d, 0xa0, 0xc2, 0x29, 0xc4, 0xf4, 0xa6, 0xae, 0xf3, 0xcd, 0x71, 0xab, 0xa7, - 0xf3, 0x85, 0xd2, 0x20, 0x4f, 0x87, 0x67, 0x81, 0xf7, 0xce, 0x3d, 0x32, 0x33, 0x4d, 0xa8, 0xa6, - 0x0d, 0xa7, 0xed, 0x74, 0xab, 0x27, 0x0f, 0xf1, 0xf6, 0xa5, 0x5d, 0x5b, 0x63, 0x31, 0x8b, 0x4c, - 0x41, 0x61, 0x43, 0xe3, 0xb4, 0x87, 0x5f, 0x8e, 0x3f, 0x40, 0xa8, 0x9f, 0x83, 0xa6, 0xbe, 0xb7, - 0x5c, 0xb5, 0x4a, 0xd9, 0xaa, 0xe5, 0x6e, 0x6b, 0xc1, 0xb5, 0xab, 0xf7, 0xca, 0x2d, 0x2b, 0x01, - 0x61, 0xe3, 0xc0, 0xba, 0x13, 0x7c, 0xfb, 0x4a, 0x70, 0x61, 0xb8, 0x73, 0x01, 0xa1, 0x5f, 0xcb, - 0xcd, 0xcb, 0xe6, 0x14, 0x58, 0xab, 0xce, 0x57, 0xc7, 0xad, 0x17, 0xb8, 0x67, 0x4c, 0x69, 0xef, - 0xed, 0x5e, 0x10, 0xfc, 0x6f, 0x41, 0x8c, 0xda, 0xc6, 0x38, 0xce, 0x3b, 0x1d, 0x5d, 0x55, 0x0a, - 0x21, 0x46, 0x6e, 0x85, 0x69, 0x88, 0x55, 0xe3, 0xa0, 0x7d, 0xd8, 0xad, 0x9e, 0xdc, 0xff, 0x8f, - 0x14, 0xfe, 0xdd, 0xdc, 0xb7, 0x32, 0x34, 0x0e, 0xc1, 0xc6, 0xa8, 0xf3, 0x73, 0x37, 0x83, 0x49, - 0xe7, 0xbd, 0x76, 0x6b, 0x09, 0x9f, 0xc0, 0x39, 0xcc, 0x21, 0xd4, 0x5c, 0xe6, 0x39, 0xda, 0xc5, - 0x66, 0xe6, 0xb3, 0x33, 0x53, 0xbf, 0x28, 0x70, 0xfe, 0x71, 0xb6, 0x6a, 0xd5, 0x8a, 0x95, 0x60, - 0xc7, 0xc7, 0x7b, 0xec, 0xd6, 0x05, 0x48, 0x03, 0x3c, 0xe5, 0x4a, 0xfb, 0x4c, 0x2b, 0xbb, 0x8d, - 0x8a, 0x7f, 0x2f, 0x1f, 0xad, 0x3e, 0xda, 0xbd, 0x0e, 0x6e, 0xf2, 0x5e, 0xdb, 0x2d, 0x33, 0x91, - 0xf6, 0x1b, 0x87, 0x6d, 0xa7, 0x7b, 0x67, 0xbb, 0x94, 0xe1, 0x28, 0xed, 0x07, 0xf6, 0x26, 0x27, - 0x06, 0x8d, 0xf2, 0x1e, 0x31, 0xb0, 0xc4, 0xc0, 0x3f, 0x5b, 0xae, 0x51, 0xe9, 0x62, 0x8d, 0x4a, - 0x97, 0x6b, 0x54, 0xfa, 0x9c, 0x21, 0x67, 0x99, 0x21, 0xe7, 0x22, 0x43, 0xce, 0x65, 0x86, 0x9c, - 0xef, 0x19, 0x72, 0xbe, 0xfc, 0x40, 0xa5, 0x37, 0xe8, 0xf6, 0x7f, 0xfc, 0x57, 0x00, 0x00, 0x00, - 0xff, 0xff, 0xdf, 0x1d, 0xe9, 0x86, 0x1d, 0x04, 0x00, 0x00, + // 698 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x95, 0xcf, 0x4e, 0xdb, 0x4a, + 0x14, 0xc6, 0x63, 0x92, 0x48, 0x78, 0x00, 0x85, 0xeb, 0xcd, 0x8d, 0x58, 0x38, 0xb9, 0xb9, 0x1b, + 0xae, 0x6e, 0x19, 0x03, 0x42, 0x51, 0xb7, 0x98, 0x48, 0x34, 0x52, 0x0b, 0xe9, 0x20, 0xba, 0xa8, + 0x58, 0xd4, 0xb1, 0x0f, 0x8e, 0x1b, 0xfc, 0x47, 0x33, 0xe3, 0x54, 0xec, 0xfa, 0x08, 0x7d, 0xa1, + 0x56, 0x6a, 0x57, 0x2c, 0x59, 0xb2, 0x8a, 0x8a, 0xfb, 0x02, 0x5d, 0xb7, 0x9b, 0x6a, 0x26, 0x4e, + 0xec, 0x24, 0x0d, 0xd0, 0x0d, 0xbb, 0xcc, 0x39, 0xbf, 0xf3, 0xcd, 0x39, 0x73, 0xbe, 0x24, 0xe8, + 0xb0, 0xff, 0x94, 0x61, 0x2f, 0x34, 0xfa, 0x71, 0x17, 0x68, 0x00, 0x1c, 0x98, 0x31, 0x80, 0xc0, + 0x09, 0xa9, 0x91, 0x26, 0xac, 0xc8, 0x33, 0x02, 0xe0, 0xef, 0x42, 0xda, 0xf7, 0x02, 0xd7, 0x18, + 0xec, 0x58, 0x17, 0x51, 0xcf, 0xda, 0x31, 0x5c, 0x08, 0x80, 0x5a, 0x1c, 0x1c, 0x1c, 0xd1, 0x90, + 0x87, 0x9a, 0x3e, 0xe2, 0xb1, 0x15, 0x79, 0x38, 0xe3, 0xf1, 0x98, 0xdf, 0xd8, 0x72, 0x3d, 0xde, + 0x8b, 0xbb, 0xd8, 0x0e, 0x7d, 0xc3, 0x0d, 0xdd, 0xd0, 0x90, 0x65, 0xdd, 0xf8, 0x5c, 0x9e, 0xe4, + 0x41, 0x7e, 0x1a, 0xc9, 0x6d, 0x34, 0x72, 0xd7, 0xdb, 0x21, 0x05, 0x63, 0x30, 0x77, 0xe5, 0xc6, + 0x5e, 0xc6, 0xf8, 0x96, 0xdd, 0xf3, 0x02, 0xa0, 0x97, 0x46, 0xd4, 0x77, 0x45, 0x80, 0x19, 0x3e, + 0x70, 0xeb, 0x77, 0x55, 0xc6, 0xa2, 0x2a, 0x1a, 0x07, 0xdc, 0xf3, 0x61, 0xae, 0xa0, 0x79, 0x5f, + 0x01, 0xb3, 0x7b, 0xe0, 0x5b, 0xb3, 0x75, 0x8d, 0x2f, 0x0a, 0x5a, 0x39, 0xb8, 0x88, 0x19, 0x07, + 0x7a, 0xd0, 0x6e, 0x11, 0xed, 0x0d, 0x5a, 0x16, 0x3d, 0x39, 0x16, 0xb7, 0xaa, 0x4a, 0x5d, 0xd9, + 0x5c, 0xd9, 0xdd, 0xc6, 0xd9, 0xa3, 0x4d, 0xa4, 0x71, 0xd4, 0x77, 0x45, 0x80, 0x61, 0x41, 0xe3, + 0xc1, 0x0e, 0x3e, 0xee, 0xbe, 0x05, 0x9b, 0xbf, 0x00, 0x6e, 0x99, 0xda, 0xd5, 0xb0, 0x56, 0x48, + 0x86, 0x35, 0x94, 0xc5, 0xc8, 0x44, 0x55, 0x7b, 0x89, 0x4a, 0x2c, 0x02, 0xbb, 0xba, 0x24, 0xd5, + 0x0d, 0x7c, 0xf7, 0x4a, 0x70, 0xae, 0xb9, 0x93, 0x08, 0x6c, 0x73, 0x35, 0x15, 0x2f, 0x89, 0x13, + 0x91, 0x52, 0x8d, 0xcf, 0x0a, 0xaa, 0xe4, 0xb8, 0xe7, 0x1e, 0xe3, 0xda, 0xd9, 0xdc, 0x20, 0xf8, + 0x61, 0x83, 0x88, 0x6a, 0x39, 0xc6, 0x7a, 0x7a, 0xd3, 0xf2, 0x38, 0x92, 0x1b, 0xa2, 0x83, 0xca, + 0x1e, 0x07, 0x9f, 0x55, 0x97, 0xea, 0xc5, 0xcd, 0x95, 0xdd, 0xff, 0xff, 0x60, 0x0a, 0x73, 0x2d, + 0xd5, 0x2d, 0xb7, 0x85, 0x02, 0x19, 0x09, 0x35, 0xbe, 0x4f, 0xcf, 0x20, 0xa6, 0xd3, 0x5e, 0xa1, + 0xd5, 0x20, 0x74, 0xe0, 0x04, 0x2e, 0xc0, 0xe6, 0x21, 0x4d, 0xe7, 0xa8, 0xe7, 0x2f, 0x13, 0xb6, + 0x13, 0x5d, 0x1f, 0xe5, 0x38, 0x73, 0x3d, 0x19, 0xd6, 0x56, 0xf3, 0x11, 0x32, 0xa5, 0xa3, 0xed, + 0xa3, 0x4a, 0x04, 0x54, 0x00, 0xcf, 0x42, 0xc6, 0x4d, 0x8f, 0x33, 0xb9, 0x8d, 0xb2, 0xf9, 0x77, + 0xda, 0x5a, 0xa5, 0x33, 0x9d, 0x26, 0xb3, 0xbc, 0x56, 0x47, 0x25, 0x2f, 0x1a, 0xec, 0x55, 0x8b, + 0x75, 0x65, 0x53, 0xcd, 0x96, 0xd2, 0xee, 0x0c, 0xf6, 0x88, 0xcc, 0xa4, 0x44, 0xb3, 0x5a, 0x9a, + 0x23, 0x9a, 0x92, 0x68, 0x36, 0x3e, 0x29, 0x48, 0x6d, 0x77, 0xf6, 0x1d, 0x87, 0x02, 0x63, 0x8f, + 0xe0, 0xbc, 0xe3, 0x29, 0xe7, 0x6d, 0xdd, 0xb7, 0xb3, 0x49, 0x6b, 0x0b, 0x7d, 0xf7, 0x51, 0x41, + 0x6b, 0x13, 0xea, 0x11, 0x5c, 0x77, 0x34, 0xed, 0xba, 0xff, 0x1e, 0x3c, 0xc1, 0x02, 0xcf, 0xf9, + 0xb9, 0xf6, 0xa5, 0xe1, 0xce, 0x90, 0x1a, 0x59, 0x14, 0x02, 0x4e, 0xe0, 0x3c, 0xed, 0xff, 0xde, + 0x2f, 0x68, 0x67, 0x5c, 0x00, 0x14, 0x02, 0x1b, 0xcc, 0xb5, 0x64, 0x58, 0x53, 0x27, 0x41, 0x92, + 0x09, 0x36, 0x7e, 0x2a, 0xa8, 0x32, 0x43, 0x6b, 0xff, 0xa2, 0xb2, 0x4b, 0xc3, 0x38, 0x92, 0xb7, + 0xa9, 0x59, 0x9f, 0x87, 0x22, 0x48, 0x46, 0x39, 0xed, 0x09, 0x5a, 0xa6, 0xc0, 0xc2, 0x98, 0xda, + 0x20, 0x97, 0xa7, 0x66, 0xaf, 0x44, 0xd2, 0x38, 0x99, 0x10, 0x9a, 0x81, 0xd4, 0xc0, 0xf2, 0x81, + 0x45, 0x96, 0x0d, 0xa9, 0x3f, 0xff, 0x4a, 0x71, 0xf5, 0x68, 0x9c, 0x20, 0x19, 0x23, 0x9c, 0x2a, + 0x0e, 0xb3, 0x4e, 0x15, 0x2c, 0x91, 0x19, 0xcd, 0x44, 0xc5, 0xd8, 0x73, 0xaa, 0x65, 0x09, 0x6c, + 0xa7, 0x40, 0xf1, 0xb4, 0xdd, 0xfa, 0x31, 0xac, 0xfd, 0xb3, 0xe8, 0x97, 0x97, 0x5f, 0x46, 0xc0, + 0xf0, 0x69, 0xbb, 0x45, 0x44, 0xb1, 0xd9, 0xba, 0xba, 0xd5, 0x0b, 0xd7, 0xb7, 0x7a, 0xe1, 0xe6, + 0x56, 0x2f, 0xbc, 0x4f, 0x74, 0xe5, 0x2a, 0xd1, 0x95, 0xeb, 0x44, 0x57, 0x6e, 0x12, 0x5d, 0xf9, + 0x9a, 0xe8, 0xca, 0x87, 0x6f, 0x7a, 0xe1, 0xb5, 0x7e, 0xf7, 0x3f, 0xda, 0xaf, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xf9, 0x9d, 0x9e, 0xc6, 0x0b, 0x07, 0x00, 0x00, } func (m *ClusterCIDR) Marshal() (dAtA []byte, err error) { @@ -312,6 +442,179 @@ func (m *ClusterCIDRSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *IPAddress) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IPAddress) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IPAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IPAddressList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IPAddressList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IPAddressList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IPAddressSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IPAddressSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IPAddressSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ParentRef != nil { + { + size, err := m.ParentRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ParentReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ParentReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ParentReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x2a + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x22 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x1a + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { offset -= sovGenerated(v) base := offset @@ -350,82 +653,597 @@ func (m *ClusterCIDRList) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } - return n -} - -func (m *ClusterCIDRSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.NodeSelector != nil { - l = m.NodeSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 1 + sovGenerated(uint64(m.PerNodeHostBits)) - l = len(m.IPv4) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.IPv6) - n += 1 + l + sovGenerated(uint64(l)) - return n -} + return n +} + +func (m *ClusterCIDRSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NodeSelector != nil { + l = m.NodeSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.PerNodeHostBits)) + l = len(m.IPv4) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.IPv6) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IPAddress) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IPAddressList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *IPAddressSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ParentRef != nil { + l = m.ParentRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ParentReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ClusterCIDR) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterCIDR{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterCIDRSpec", "ClusterCIDRSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterCIDRList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ClusterCIDR{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterCIDR", "ClusterCIDR", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ClusterCIDRList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ClusterCIDRSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterCIDRSpec{`, + `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v11.NodeSelector", 1) + `,`, + `PerNodeHostBits:` + fmt.Sprintf("%v", this.PerNodeHostBits) + `,`, + `IPv4:` + fmt.Sprintf("%v", this.IPv4) + `,`, + `IPv6:` + fmt.Sprintf("%v", this.IPv6) + `,`, + `}`, + }, "") + return s +} +func (this *IPAddress) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IPAddress{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IPAddressSpec", "IPAddressSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IPAddressList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]IPAddress{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "IPAddress", "IPAddress", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&IPAddressList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *IPAddressSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IPAddressSpec{`, + `ParentRef:` + strings.Replace(this.ParentRef.String(), "ParentReference", "ParentReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ParentReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ParentReference{`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ClusterCIDR) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterCIDR: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterCIDR: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterCIDRList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterCIDRList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterCIDRList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterCIDR{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterCIDRSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterCIDRSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterCIDRSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = &v11.NodeSelector{} + } + if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PerNodeHostBits", wireType) + } + m.PerNodeHostBits = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PerNodeHostBits |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IPv4", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IPv4 = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IPv6", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IPv6 = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *ClusterCIDR) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ClusterCIDR{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterCIDRSpec", "ClusterCIDRSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ClusterCIDRList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]ClusterCIDR{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterCIDR", "ClusterCIDR", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ClusterCIDRList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *ClusterCIDRSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ClusterCIDRSpec{`, - `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v11.NodeSelector", 1) + `,`, - `PerNodeHostBits:` + fmt.Sprintf("%v", this.PerNodeHostBits) + `,`, - `IPv4:` + fmt.Sprintf("%v", this.IPv4) + `,`, - `IPv6:` + fmt.Sprintf("%v", this.IPv6) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" + if iNdEx > l { + return io.ErrUnexpectedEOF } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + return nil } -func (m *ClusterCIDR) Unmarshal(dAtA []byte) error { +func (m *IPAddress) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -448,10 +1266,10 @@ func (m *ClusterCIDR) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ClusterCIDR: wiretype end group for non-group") + return fmt.Errorf("proto: IPAddress: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterCIDR: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IPAddress: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -541,7 +1359,7 @@ func (m *ClusterCIDR) Unmarshal(dAtA []byte) error { } return nil } -func (m *ClusterCIDRList) Unmarshal(dAtA []byte) error { +func (m *IPAddressList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -564,10 +1382,10 @@ func (m *ClusterCIDRList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ClusterCIDRList: wiretype end group for non-group") + return fmt.Errorf("proto: IPAddressList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterCIDRList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IPAddressList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -632,7 +1450,7 @@ func (m *ClusterCIDRList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, ClusterCIDR{}) + m.Items = append(m.Items, IPAddress{}) if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -658,7 +1476,7 @@ func (m *ClusterCIDRList) Unmarshal(dAtA []byte) error { } return nil } -func (m *ClusterCIDRSpec) Unmarshal(dAtA []byte) error { +func (m *IPAddressSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -681,15 +1499,15 @@ func (m *ClusterCIDRSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ClusterCIDRSpec: wiretype end group for non-group") + return fmt.Errorf("proto: IPAddressSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterCIDRSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IPAddressSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ParentRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -716,18 +1534,100 @@ func (m *ClusterCIDRSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.NodeSelector == nil { - m.NodeSelector = &v11.NodeSelector{} + if m.ParentRef == nil { + m.ParentRef = &ParentReference{} } - if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ParentRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { return err } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ParentReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ParentReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ParentReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PerNodeHostBits", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) } - m.PerNodeHostBits = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -737,14 +1637,27 @@ func (m *ClusterCIDRSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.PerNodeHostBits |= int32(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IPv4", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -772,11 +1685,11 @@ func (m *ClusterCIDRSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.IPv4 = string(dAtA[iNdEx:postIndex]) + m.Namespace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IPv6", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -804,7 +1717,39 @@ func (m *ClusterCIDRSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.IPv6 = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex diff --git a/vendor/k8s.io/api/networking/v1alpha1/generated.proto b/vendor/k8s.io/api/networking/v1alpha1/generated.proto index bbda585b85..0f1f30d701 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/networking/v1alpha1/generated.proto @@ -44,7 +44,7 @@ message ClusterCIDR { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec is the desired state of the ClusterCIDR. + // spec is the desired state of the ClusterCIDR. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional ClusterCIDRSpec spec = 2; @@ -57,19 +57,19 @@ message ClusterCIDRList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of ClusterCIDRs. + // items is the list of ClusterCIDRs. repeated ClusterCIDR items = 2; } // ClusterCIDRSpec defines the desired state of ClusterCIDR. message ClusterCIDRSpec { - // NodeSelector defines which nodes the config is applicable to. - // An empty or nil NodeSelector selects all nodes. + // nodeSelector defines which nodes the config is applicable to. + // An empty or nil nodeSelector selects all nodes. // This field is immutable. // +optional optional k8s.io.api.core.v1.NodeSelector nodeSelector = 1; - // PerNodeHostBits defines the number of host bits to be configured per node. + // perNodeHostBits defines the number of host bits to be configured per node. // A subnet mask determines how much of the address is used for network bits // and host bits. For example an IPv4 address of 192.168.0.0/24, splits the // address into 24 bits for the network portion and 8 bits for the host portion. @@ -79,16 +79,77 @@ message ClusterCIDRSpec { // +required optional int32 perNodeHostBits = 2; - // IPv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8"). - // At least one of IPv4 and IPv6 must be specified. + // ipv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8"). + // At least one of ipv4 and ipv6 must be specified. // This field is immutable. // +optional optional string ipv4 = 3; - // IPv6 defines an IPv6 IP block in CIDR notation(e.g. "2001:db8::/64"). - // At least one of IPv4 and IPv6 must be specified. + // ipv6 defines an IPv6 IP block in CIDR notation(e.g. "2001:db8::/64"). + // At least one of ipv4 and ipv6 must be specified. // This field is immutable. // +optional optional string ipv6 = 4; } +// IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs +// that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. +// An IP address can be represented in different formats, to guarantee the uniqueness of the IP, +// the name of the object is the IP address in canonical format, four decimal digits separated +// by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. +// Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 +// Invalid: 10.01.2.3 or 2001:db8:0:0:0::1 +message IPAddress { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is the desired state of the IPAddress. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional IPAddressSpec spec = 2; +} + +// IPAddressList contains a list of IPAddress. +message IPAddressList { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of IPAddresses. + repeated IPAddress items = 2; +} + +// IPAddressSpec describe the attributes in an IP Address. +message IPAddressSpec { + // ParentRef references the resource that an IPAddress is attached to. + // An IPAddress must reference a parent object. + // +required + optional ParentReference parentRef = 1; +} + +// ParentReference describes a reference to a parent object. +message ParentReference { + // Group is the group of the object being referenced. + // +optional + optional string group = 1; + + // Resource is the resource of the object being referenced. + // +required + optional string resource = 2; + + // Namespace is the namespace of the object being referenced. + // +optional + optional string namespace = 3; + + // Name is the name of the object being referenced. + // +required + optional string name = 4; + + // UID is the uid of the object being referenced. + // +optional + optional string uid = 5; +} + diff --git a/vendor/k8s.io/api/networking/v1alpha1/register.go b/vendor/k8s.io/api/networking/v1alpha1/register.go index 12c0cf7bd4..8dda6394d4 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/register.go +++ b/vendor/k8s.io/api/networking/v1alpha1/register.go @@ -22,12 +22,17 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" ) -// GroupName is the group name use in this package. +// GroupName is the group name used in this package. const GroupName = "networking.k8s.io" -// SchemeGroupVersion is group version used to register these objects. +// SchemeGroupVersion is group version used to register objects in this package. var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} +// Kind takes an unqualified kind and returns a Group qualified GroupKind. +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + // Resource takes an unqualified resource and returns a Group qualified GroupResource. func Resource(resource string) schema.GroupResource { return SchemeGroupVersion.WithResource(resource).GroupResource() @@ -49,8 +54,9 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &ClusterCIDR{}, &ClusterCIDRList{}, + &IPAddress{}, + &IPAddressList{}, ) - // Add the watch version that applies. metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil } diff --git a/vendor/k8s.io/api/networking/v1alpha1/types.go b/vendor/k8s.io/api/networking/v1alpha1/types.go index 734e9bf8a8..52e4a11e8b 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/types.go +++ b/vendor/k8s.io/api/networking/v1alpha1/types.go @@ -19,6 +19,7 @@ package v1alpha1 import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" ) // +genclient @@ -37,12 +38,13 @@ import ( // selector matches the Node may be used. type ClusterCIDR struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec is the desired state of the ClusterCIDR. + // spec is the desired state of the ClusterCIDR. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec ClusterCIDRSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -50,13 +52,13 @@ type ClusterCIDR struct { // ClusterCIDRSpec defines the desired state of ClusterCIDR. type ClusterCIDRSpec struct { - // NodeSelector defines which nodes the config is applicable to. - // An empty or nil NodeSelector selects all nodes. + // nodeSelector defines which nodes the config is applicable to. + // An empty or nil nodeSelector selects all nodes. // This field is immutable. // +optional NodeSelector *v1.NodeSelector `json:"nodeSelector,omitempty" protobuf:"bytes,1,opt,name=nodeSelector"` - // PerNodeHostBits defines the number of host bits to be configured per node. + // perNodeHostBits defines the number of host bits to be configured per node. // A subnet mask determines how much of the address is used for network bits // and host bits. For example an IPv4 address of 192.168.0.0/24, splits the // address into 24 bits for the network portion and 8 bits for the host portion. @@ -66,14 +68,14 @@ type ClusterCIDRSpec struct { // +required PerNodeHostBits int32 `json:"perNodeHostBits" protobuf:"varint,2,opt,name=perNodeHostBits"` - // IPv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8"). - // At least one of IPv4 and IPv6 must be specified. + // ipv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8"). + // At least one of ipv4 and ipv6 must be specified. // This field is immutable. // +optional IPv4 string `json:"ipv4" protobuf:"bytes,3,opt,name=ipv4"` - // IPv6 defines an IPv6 IP block in CIDR notation(e.g. "2001:db8::/64"). - // At least one of IPv4 and IPv6 must be specified. + // ipv6 defines an IPv6 IP block in CIDR notation(e.g. "2001:db8::/64"). + // At least one of ipv4 and ipv6 must be specified. // This field is immutable. // +optional IPv6 string `json:"ipv6" protobuf:"bytes,4,opt,name=ipv6"` @@ -85,11 +87,77 @@ type ClusterCIDRSpec struct { // ClusterCIDRList contains a list of ClusterCIDR. type ClusterCIDRList struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of ClusterCIDRs. + // items is the list of ClusterCIDRs. Items []ClusterCIDR `json:"items" protobuf:"bytes,2,rep,name=items"` } + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.27 + +// IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs +// that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. +// An IP address can be represented in different formats, to guarantee the uniqueness of the IP, +// the name of the object is the IP address in canonical format, four decimal digits separated +// by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. +// Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 +// Invalid: 10.01.2.3 or 2001:db8:0:0:0::1 +type IPAddress struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // spec is the desired state of the IPAddress. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec IPAddressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// IPAddressSpec describe the attributes in an IP Address. +type IPAddressSpec struct { + // ParentRef references the resource that an IPAddress is attached to. + // An IPAddress must reference a parent object. + // +required + ParentRef *ParentReference `json:"parentRef,omitempty" protobuf:"bytes,1,opt,name=parentRef"` +} + +// ParentReference describes a reference to a parent object. +type ParentReference struct { + // Group is the group of the object being referenced. + // +optional + Group string `json:"group,omitempty" protobuf:"bytes,1,opt,name=group"` + // Resource is the resource of the object being referenced. + // +required + Resource string `json:"resource,omitempty" protobuf:"bytes,2,opt,name=resource"` + // Namespace is the namespace of the object being referenced. + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"` + // Name is the name of the object being referenced. + // +required + Name string `json:"name,omitempty" protobuf:"bytes,4,opt,name=name"` + // UID is the uid of the object being referenced. + // +optional + UID types.UID `json:"uid,omitempty" protobuf:"bytes,5,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.27 + +// IPAddressList contains a list of IPAddress. +type IPAddressList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // items is the list of IPAddresses. + Items []IPAddress `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go index e0d4a47861..85304784f4 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go @@ -24,13 +24,13 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ClusterCIDR = map[string]string{ "": "ClusterCIDR represents a single configuration for per-Node Pod CIDR allocations when the MultiCIDRRangeAllocator is enabled (see the config for kube-controller-manager). A cluster may have any number of ClusterCIDR resources, all of which will be considered when allocating a CIDR for a Node. A ClusterCIDR is eligible to be used for a given Node when the node selector matches the node in question and has free CIDRs to allocate. In case of multiple matching ClusterCIDR resources, the allocator will attempt to break ties using internal heuristics, but any ClusterCIDR whose node selector matches the Node may be used.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec is the desired state of the ClusterCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec is the desired state of the ClusterCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (ClusterCIDR) SwaggerDoc() map[string]string { @@ -40,7 +40,7 @@ func (ClusterCIDR) SwaggerDoc() map[string]string { var map_ClusterCIDRList = map[string]string{ "": "ClusterCIDRList contains a list of ClusterCIDR.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of ClusterCIDRs.", + "items": "items is the list of ClusterCIDRs.", } func (ClusterCIDRList) SwaggerDoc() map[string]string { @@ -49,14 +49,56 @@ func (ClusterCIDRList) SwaggerDoc() map[string]string { var map_ClusterCIDRSpec = map[string]string{ "": "ClusterCIDRSpec defines the desired state of ClusterCIDR.", - "nodeSelector": "NodeSelector defines which nodes the config is applicable to. An empty or nil NodeSelector selects all nodes. This field is immutable.", - "perNodeHostBits": "PerNodeHostBits defines the number of host bits to be configured per node. A subnet mask determines how much of the address is used for network bits and host bits. For example an IPv4 address of 192.168.0.0/24, splits the address into 24 bits for the network portion and 8 bits for the host portion. To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). Minimum value is 4 (16 IPs). This field is immutable.", - "ipv4": "IPv4 defines an IPv4 IP block in CIDR notation(e.g. \"10.0.0.0/8\"). At least one of IPv4 and IPv6 must be specified. This field is immutable.", - "ipv6": "IPv6 defines an IPv6 IP block in CIDR notation(e.g. \"2001:db8::/64\"). At least one of IPv4 and IPv6 must be specified. This field is immutable.", + "nodeSelector": "nodeSelector defines which nodes the config is applicable to. An empty or nil nodeSelector selects all nodes. This field is immutable.", + "perNodeHostBits": "perNodeHostBits defines the number of host bits to be configured per node. A subnet mask determines how much of the address is used for network bits and host bits. For example an IPv4 address of 192.168.0.0/24, splits the address into 24 bits for the network portion and 8 bits for the host portion. To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). Minimum value is 4 (16 IPs). This field is immutable.", + "ipv4": "ipv4 defines an IPv4 IP block in CIDR notation(e.g. \"10.0.0.0/8\"). At least one of ipv4 and ipv6 must be specified. This field is immutable.", + "ipv6": "ipv6 defines an IPv6 IP block in CIDR notation(e.g. \"2001:db8::/64\"). At least one of ipv4 and ipv6 must be specified. This field is immutable.", } func (ClusterCIDRSpec) SwaggerDoc() map[string]string { return map_ClusterCIDRSpec } +var map_IPAddress = map[string]string{ + "": "IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. An IP address can be represented in different formats, to guarantee the uniqueness of the IP, the name of the object is the IP address in canonical format, four decimal digits separated by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 Invalid: 10.01.2.3 or 2001:db8:0:0:0::1", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the desired state of the IPAddress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (IPAddress) SwaggerDoc() map[string]string { + return map_IPAddress +} + +var map_IPAddressList = map[string]string{ + "": "IPAddressList contains a list of IPAddress.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of IPAddresses.", +} + +func (IPAddressList) SwaggerDoc() map[string]string { + return map_IPAddressList +} + +var map_IPAddressSpec = map[string]string{ + "": "IPAddressSpec describe the attributes in an IP Address.", + "parentRef": "ParentRef references the resource that an IPAddress is attached to. An IPAddress must reference a parent object.", +} + +func (IPAddressSpec) SwaggerDoc() map[string]string { + return map_IPAddressSpec +} + +var map_ParentReference = map[string]string{ + "": "ParentReference describes a reference to a parent object.", + "group": "Group is the group of the object being referenced.", + "resource": "Resource is the resource of the object being referenced.", + "namespace": "Namespace is the namespace of the object being referenced.", + "name": "Name is the name of the object being referenced.", + "uid": "UID is the uid of the object being referenced.", +} + +func (ParentReference) SwaggerDoc() map[string]string { + return map_ParentReference +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/networking/v1alpha1/well_known_labels.go b/vendor/k8s.io/api/networking/v1alpha1/well_known_labels.go new file mode 100644 index 0000000000..5f9c23f708 --- /dev/null +++ b/vendor/k8s.io/api/networking/v1alpha1/well_known_labels.go @@ -0,0 +1,33 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +const ( + + // TODO: Use IPFamily as field with a field selector,And the value is set based on + // the name at create time and immutable. + // LabelIPAddressFamily is used to indicate the IP family of a Kubernetes IPAddress. + // This label simplify dual-stack client operations allowing to obtain the list of + // IP addresses filtered by family. + LabelIPAddressFamily = "ipaddress.kubernetes.io/ip-family" + // LabelManagedBy is used to indicate the controller or entity that manages + // an IPAddress. This label aims to enable different IPAddress + // objects to be managed by different controllers or entities within the + // same cluster. It is highly recommended to configure this label for all + // IPAddress objects. + LabelManagedBy = "ipaddress.kubernetes.io/managed-by" +) diff --git a/vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go index e549f31663..97db2eacc9 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go @@ -106,3 +106,100 @@ func (in *ClusterCIDRSpec) DeepCopy() *ClusterCIDRSpec { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAddress) DeepCopyInto(out *IPAddress) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddress. +func (in *IPAddress) DeepCopy() *IPAddress { + if in == nil { + return nil + } + out := new(IPAddress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IPAddress) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAddressList) DeepCopyInto(out *IPAddressList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IPAddress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressList. +func (in *IPAddressList) DeepCopy() *IPAddressList { + if in == nil { + return nil + } + out := new(IPAddressList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IPAddressList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAddressSpec) DeepCopyInto(out *IPAddressSpec) { + *out = *in + if in.ParentRef != nil { + in, out := &in.ParentRef, &out.ParentRef + *out = new(ParentReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressSpec. +func (in *IPAddressSpec) DeepCopy() *IPAddressSpec { + if in == nil { + return nil + } + out := new(IPAddressSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParentReference) DeepCopyInto(out *ParentReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParentReference. +func (in *ParentReference) DeepCopy() *ParentReference { + if in == nil { + return nil + } + out := new(ParentReference) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go index dd6e3b26cb..60438ba59f 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go @@ -56,3 +56,39 @@ func (in *ClusterCIDRList) APILifecycleDeprecated() (major, minor int) { func (in *ClusterCIDRList) APILifecycleRemoved() (major, minor int) { return 1, 31 } + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *IPAddress) APILifecycleIntroduced() (major, minor int) { + return 1, 27 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *IPAddress) APILifecycleDeprecated() (major, minor int) { + return 1, 30 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *IPAddress) APILifecycleRemoved() (major, minor int) { + return 1, 33 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *IPAddressList) APILifecycleIntroduced() (major, minor int) { + return 1, 27 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *IPAddressList) APILifecycleDeprecated() (major, minor int) { + return 1, 30 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *IPAddressList) APILifecycleRemoved() (major, minor int) { + return 1, 33 +} diff --git a/vendor/k8s.io/api/networking/v1beta1/generated.proto b/vendor/k8s.io/api/networking/v1beta1/generated.proto index 78ecf9fae2..46bb7f66f2 100644 --- a/vendor/k8s.io/api/networking/v1beta1/generated.proto +++ b/vendor/k8s.io/api/networking/v1beta1/generated.proto @@ -33,14 +33,14 @@ option go_package = "k8s.io/api/networking/v1beta1"; // HTTPIngressPath associates a path with a backend. Incoming urls matching the // path are forwarded to the backend. message HTTPIngressPath { - // Path is matched against the path of an incoming request. Currently it can + // path is matched against the path of an incoming request. Currently it can // contain characters disallowed from the conventional "path" part of a URL // as defined by RFC 3986. Paths must begin with a '/' and must be present // when using PathType with value "Exact" or "Prefix". // +optional optional string path = 1; - // PathType determines the interpretation of the Path matching. PathType can + // pathType determines the interpretation of the path matching. PathType can // be one of the following values: // * Exact: Matches the URL path exactly. // * Prefix: Matches based on a URL path prefix split by '/'. Matching is @@ -57,7 +57,7 @@ message HTTPIngressPath { // Defaults to ImplementationSpecific. optional string pathType = 3; - // Backend defines the referenced service endpoint to which the traffic + // backend defines the referenced service endpoint to which the traffic // will be forwarded to. optional IngressBackend backend = 2; } @@ -68,7 +68,7 @@ message HTTPIngressPath { // to match against everything after the last '/' and before the first '?' // or '#'. message HTTPIngressRuleValue { - // A collection of paths that map requests to backends. + // paths is a collection of paths that map requests to backends. repeated HTTPIngressPath paths = 1; } @@ -82,12 +82,12 @@ message Ingress { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec is the desired state of the Ingress. + // spec is the desired state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional IngressSpec spec = 2; - // Status is the current state of the Ingress. + // status is the current state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional IngressStatus status = 3; @@ -95,15 +95,15 @@ message Ingress { // IngressBackend describes all endpoints for a given service and port. message IngressBackend { - // Specifies the name of the referenced service. + // serviceName specifies the name of the referenced service. // +optional optional string serviceName = 1; - // Specifies the port of the referenced service. + // servicePort Specifies the port of the referenced service. // +optional optional k8s.io.apimachinery.pkg.util.intstr.IntOrString servicePort = 2; - // Resource is an ObjectRef to another Kubernetes resource in the namespace + // resource is an ObjectRef to another Kubernetes resource in the namespace // of the Ingress object. If resource is specified, serviceName and servicePort // must not be specified. // +optional @@ -121,7 +121,7 @@ message IngressClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec is the desired state of the IngressClass. + // spec is the desired state of the IngressClass. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional IngressClassSpec spec = 2; @@ -133,30 +133,30 @@ message IngressClassList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of IngressClasses. + // items is the list of IngressClasses. repeated IngressClass items = 2; } // IngressClassParametersReference identifies an API object. This can be used // to specify a cluster or namespace-scoped resource. message IngressClassParametersReference { - // APIGroup is the group for the resource being referenced. If APIGroup is + // apiGroup is the group for the resource being referenced. If APIGroup is // not specified, the specified Kind must be in the core API group. For any // other third-party types, APIGroup is required. // +optional optional string aPIGroup = 1; - // Kind is the type of resource being referenced. + // kind is the type of resource being referenced. optional string kind = 2; - // Name is the name of resource being referenced. + // name is the name of resource being referenced. optional string name = 3; - // Scope represents if this refers to a cluster or namespace scoped resource. + // scope represents if this refers to a cluster or namespace scoped resource. // This may be set to "Cluster" (default) or "Namespace". optional string scope = 4; - // Namespace is the namespace of the resource being referenced. This field is + // namespace is the namespace of the resource being referenced. This field is // required when scope is set to "Namespace" and must be unset when scope is set to // "Cluster". // +optional @@ -165,15 +165,15 @@ message IngressClassParametersReference { // IngressClassSpec provides information about the class of an Ingress. message IngressClassSpec { - // Controller refers to the name of the controller that should handle this + // controller refers to the name of the controller that should handle this // class. This allows for different "flavors" that are controlled by the - // same controller. For example, you may have different Parameters for the + // same controller. For example, you may have different parameters for the // same implementing controller. This should be specified as a // domain-prefixed path no more than 250 characters in length, e.g. // "acme.io/ingress-controller". This field is immutable. optional string controller = 1; - // Parameters is a link to a custom resource containing additional + // parameters is a link to a custom resource containing additional // configuration for the controller. This is optional if the controller does // not require extra parameters. // +optional @@ -187,21 +187,21 @@ message IngressList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of Ingress. + // items is the list of Ingress. repeated Ingress items = 2; } // IngressLoadBalancerIngress represents the status of a load-balancer ingress point. message IngressLoadBalancerIngress { - // IP is set for load-balancer ingress points that are IP based. + // ip is set for load-balancer ingress points that are IP based. // +optional optional string ip = 1; - // Hostname is set for load-balancer ingress points that are DNS based. + // hostname is set for load-balancer ingress points that are DNS based. // +optional optional string hostname = 2; - // Ports provides information about the ports exposed by this LoadBalancer. + // ports provides information about the ports exposed by this LoadBalancer. // +listType=atomic // +optional repeated IngressPortStatus ports = 4; @@ -209,21 +209,21 @@ message IngressLoadBalancerIngress { // LoadBalancerStatus represents the status of a load-balancer. message IngressLoadBalancerStatus { - // Ingress is a list containing ingress points for the load-balancer. + // ingress is a list containing ingress points for the load-balancer. // +optional repeated IngressLoadBalancerIngress ingress = 1; } // IngressPortStatus represents the error condition of a service port message IngressPortStatus { - // Port is the port number of the ingress port. + // port is the port number of the ingress port. optional int32 port = 1; - // Protocol is the protocol of the ingress port. + // protocol is the protocol of the ingress port. // The supported values are: "TCP", "UDP", "SCTP" optional string protocol = 2; - // Error is to record the problem with the service port + // error is to record the problem with the service port // The format of the error shall comply with the following rules: // - built-in error values shall be specified in this file and those shall use // CamelCase names @@ -242,7 +242,7 @@ message IngressPortStatus { // the related backend services. Incoming requests are first evaluated for a host // match, then routed to the backend associated with the matching IngressRuleValue. message IngressRule { - // Host is the fully qualified domain name of a network host, as defined by RFC 3986. + // host is the fully qualified domain name of a network host, as defined by RFC 3986. // Note the following deviations from the "host" part of the // URI as defined in RFC 3986: // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to @@ -255,7 +255,7 @@ message IngressRule { // IngressRuleValue. If the host is unspecified, the Ingress routes all // traffic based on the specified IngressRuleValue. // - // Host can be "precise" which is a domain name without the terminating dot of + // host can be "precise" which is a domain name without the terminating dot of // a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name // prefixed with a single wildcard label (e.g. "*.foo.com"). // The wildcard character '*' must appear by itself as the first DNS label and @@ -287,7 +287,7 @@ message IngressRuleValue { // IngressSpec describes the Ingress the user wishes to exist. message IngressSpec { - // IngressClassName is the name of the IngressClass cluster resource. The + // ingressClassName is the name of the IngressClass cluster resource. The // associated IngressClass defines which controller will implement the // resource. This replaces the deprecated `kubernetes.io/ingress.class` // annotation. For backwards compatibility, when that annotation is set, it @@ -300,44 +300,44 @@ message IngressSpec { // +optional optional string ingressClassName = 4; - // A default backend capable of servicing requests that don't match any + // backend is the default backend capable of servicing requests that don't match any // rule. At least one of 'backend' or 'rules' must be specified. This field // is optional to allow the loadbalancer controller or defaulting logic to // specify a global default. // +optional optional IngressBackend backend = 1; - // TLS configuration. Currently the Ingress only supports a single TLS - // port, 443. If multiple members of this list specify different hosts, they - // will be multiplexed on the same port according to the hostname specified + // tls represents the TLS configuration. Currently the Ingress only supports a + // single TLS port, 443. If multiple members of this list specify different hosts, + // they will be multiplexed on the same port according to the hostname specified // through the SNI TLS extension, if the ingress controller fulfilling the // ingress supports SNI. // +optional repeated IngressTLS tls = 2; - // A list of host rules used to configure the Ingress. If unspecified, or + // rules is a list of host rules used to configure the Ingress. If unspecified, or // no rule matches, all traffic is sent to the default backend. // +optional repeated IngressRule rules = 3; } -// IngressStatus describe the current state of the Ingress. +// IngressStatus describes the current state of the Ingress. message IngressStatus { - // LoadBalancer contains the current status of the load-balancer. + // loadBalancer contains the current status of the load-balancer. // +optional optional IngressLoadBalancerStatus loadBalancer = 1; } // IngressTLS describes the transport layer security associated with an Ingress. message IngressTLS { - // Hosts are a list of hosts included in the TLS certificate. The values in + // hosts is a list of hosts included in the TLS certificate. The values in // this list must match the name/s used in the tlsSecret. Defaults to the // wildcard host setting for the loadbalancer controller fulfilling this // Ingress, if left unspecified. // +optional repeated string hosts = 1; - // SecretName is the name of the secret used to terminate TLS traffic on + // secretName is the name of the secret used to terminate TLS traffic on // port 443. Field is left optional to allow TLS routing based on SNI // hostname alone. If the SNI host in a listener conflicts with the "Host" // header field used by an IngressRule, the SNI host is used for termination diff --git a/vendor/k8s.io/api/networking/v1beta1/types.go b/vendor/k8s.io/api/networking/v1beta1/types.go index 49c82123d0..87cc91654b 100644 --- a/vendor/k8s.io/api/networking/v1beta1/types.go +++ b/vendor/k8s.io/api/networking/v1beta1/types.go @@ -34,17 +34,18 @@ import ( // based virtual hosting etc. type Ingress struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec is the desired state of the Ingress. + // spec is the desired state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec IngressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // Status is the current state of the Ingress. + // status is the current state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status IngressStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` @@ -58,18 +59,19 @@ type Ingress struct { // IngressList is a collection of Ingress. type IngressList struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of Ingress. + // items is the list of Ingress. Items []Ingress `json:"items" protobuf:"bytes,2,rep,name=items"` } // IngressSpec describes the Ingress the user wishes to exist. type IngressSpec struct { - // IngressClassName is the name of the IngressClass cluster resource. The + // ingressClassName is the name of the IngressClass cluster resource. The // associated IngressClass defines which controller will implement the // resource. This replaces the deprecated `kubernetes.io/ingress.class` // annotation. For backwards compatibility, when that annotation is set, it @@ -82,22 +84,22 @@ type IngressSpec struct { // +optional IngressClassName *string `json:"ingressClassName,omitempty" protobuf:"bytes,4,opt,name=ingressClassName"` - // A default backend capable of servicing requests that don't match any + // backend is the default backend capable of servicing requests that don't match any // rule. At least one of 'backend' or 'rules' must be specified. This field // is optional to allow the loadbalancer controller or defaulting logic to // specify a global default. // +optional Backend *IngressBackend `json:"backend,omitempty" protobuf:"bytes,1,opt,name=backend"` - // TLS configuration. Currently the Ingress only supports a single TLS - // port, 443. If multiple members of this list specify different hosts, they - // will be multiplexed on the same port according to the hostname specified + // tls represents the TLS configuration. Currently the Ingress only supports a + // single TLS port, 443. If multiple members of this list specify different hosts, + // they will be multiplexed on the same port according to the hostname specified // through the SNI TLS extension, if the ingress controller fulfilling the // ingress supports SNI. // +optional TLS []IngressTLS `json:"tls,omitempty" protobuf:"bytes,2,rep,name=tls"` - // A list of host rules used to configure the Ingress. If unspecified, or + // rules is a list of host rules used to configure the Ingress. If unspecified, or // no rule matches, all traffic is sent to the default backend. // +optional Rules []IngressRule `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` @@ -106,13 +108,14 @@ type IngressSpec struct { // IngressTLS describes the transport layer security associated with an Ingress. type IngressTLS struct { - // Hosts are a list of hosts included in the TLS certificate. The values in + // hosts is a list of hosts included in the TLS certificate. The values in // this list must match the name/s used in the tlsSecret. Defaults to the // wildcard host setting for the loadbalancer controller fulfilling this // Ingress, if left unspecified. // +optional Hosts []string `json:"hosts,omitempty" protobuf:"bytes,1,rep,name=hosts"` - // SecretName is the name of the secret used to terminate TLS traffic on + + // secretName is the name of the secret used to terminate TLS traffic on // port 443. Field is left optional to allow TLS routing based on SNI // hostname alone. If the SNI host in a listener conflicts with the "Host" // header field used by an IngressRule, the SNI host is used for termination @@ -122,31 +125,31 @@ type IngressTLS struct { // TODO: Consider specifying different modes of termination, protocols etc. } -// IngressStatus describe the current state of the Ingress. +// IngressStatus describes the current state of the Ingress. type IngressStatus struct { - // LoadBalancer contains the current status of the load-balancer. + // loadBalancer contains the current status of the load-balancer. // +optional LoadBalancer IngressLoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"` } // LoadBalancerStatus represents the status of a load-balancer. type IngressLoadBalancerStatus struct { - // Ingress is a list containing ingress points for the load-balancer. + // ingress is a list containing ingress points for the load-balancer. // +optional Ingress []IngressLoadBalancerIngress `json:"ingress,omitempty" protobuf:"bytes,1,rep,name=ingress"` } // IngressLoadBalancerIngress represents the status of a load-balancer ingress point. type IngressLoadBalancerIngress struct { - // IP is set for load-balancer ingress points that are IP based. + // ip is set for load-balancer ingress points that are IP based. // +optional IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"` - // Hostname is set for load-balancer ingress points that are DNS based. + // hostname is set for load-balancer ingress points that are DNS based. // +optional Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"` - // Ports provides information about the ports exposed by this LoadBalancer. + // ports provides information about the ports exposed by this LoadBalancer. // +listType=atomic // +optional Ports []IngressPortStatus `json:"ports,omitempty" protobuf:"bytes,4,rep,name=ports"` @@ -154,14 +157,14 @@ type IngressLoadBalancerIngress struct { // IngressPortStatus represents the error condition of a service port type IngressPortStatus struct { - // Port is the port number of the ingress port. + // port is the port number of the ingress port. Port int32 `json:"port" protobuf:"varint,1,opt,name=port"` - // Protocol is the protocol of the ingress port. + // protocol is the protocol of the ingress port. // The supported values are: "TCP", "UDP", "SCTP" Protocol v1.Protocol `json:"protocol" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"` - // Error is to record the problem with the service port + // error is to record the problem with the service port // The format of the error shall comply with the following rules: // - built-in error values shall be specified in this file and those shall use // CamelCase names @@ -180,7 +183,7 @@ type IngressPortStatus struct { // the related backend services. Incoming requests are first evaluated for a host // match, then routed to the backend associated with the matching IngressRuleValue. type IngressRule struct { - // Host is the fully qualified domain name of a network host, as defined by RFC 3986. + // host is the fully qualified domain name of a network host, as defined by RFC 3986. // Note the following deviations from the "host" part of the // URI as defined in RFC 3986: // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to @@ -193,7 +196,7 @@ type IngressRule struct { // IngressRuleValue. If the host is unspecified, the Ingress routes all // traffic based on the specified IngressRuleValue. // - // Host can be "precise" which is a domain name without the terminating dot of + // host can be "precise" which is a domain name without the terminating dot of // a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name // prefixed with a single wildcard label (e.g. "*.foo.com"). // The wildcard character '*' must appear by itself as the first DNS label and @@ -204,6 +207,7 @@ type IngressRule struct { // is to equal to the suffix (removing the first label) of the wildcard rule. // +optional Host string `json:"host,omitempty" protobuf:"bytes,1,opt,name=host"` + // IngressRuleValue represents a rule to route requests for this IngressRule. // If unspecified, the rule defaults to a http catch-all. Whether that sends // just traffic matching the host to the default backend or all traffic to the @@ -234,7 +238,7 @@ type IngressRuleValue struct { // to match against everything after the last '/' and before the first '?' // or '#'. type HTTPIngressRuleValue struct { - // A collection of paths that map requests to backends. + // paths is a collection of paths that map requests to backends. Paths []HTTPIngressPath `json:"paths" protobuf:"bytes,1,rep,name=paths"` // TODO: Consider adding fields for ingress-type specific global // options usable by a loadbalancer, like http keep-alive. @@ -273,14 +277,14 @@ const ( // HTTPIngressPath associates a path with a backend. Incoming urls matching the // path are forwarded to the backend. type HTTPIngressPath struct { - // Path is matched against the path of an incoming request. Currently it can + // path is matched against the path of an incoming request. Currently it can // contain characters disallowed from the conventional "path" part of a URL // as defined by RFC 3986. Paths must begin with a '/' and must be present // when using PathType with value "Exact" or "Prefix". // +optional Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` - // PathType determines the interpretation of the Path matching. PathType can + // pathType determines the interpretation of the path matching. PathType can // be one of the following values: // * Exact: Matches the URL path exactly. // * Prefix: Matches based on a URL path prefix split by '/'. Matching is @@ -297,22 +301,22 @@ type HTTPIngressPath struct { // Defaults to ImplementationSpecific. PathType *PathType `json:"pathType,omitempty" protobuf:"bytes,3,opt,name=pathType"` - // Backend defines the referenced service endpoint to which the traffic + // backend defines the referenced service endpoint to which the traffic // will be forwarded to. Backend IngressBackend `json:"backend" protobuf:"bytes,2,opt,name=backend"` } // IngressBackend describes all endpoints for a given service and port. type IngressBackend struct { - // Specifies the name of the referenced service. + // serviceName specifies the name of the referenced service. // +optional ServiceName string `json:"serviceName,omitempty" protobuf:"bytes,1,opt,name=serviceName"` - // Specifies the port of the referenced service. + // servicePort Specifies the port of the referenced service. // +optional ServicePort intstr.IntOrString `json:"servicePort,omitempty" protobuf:"bytes,2,opt,name=servicePort"` - // Resource is an ObjectRef to another Kubernetes resource in the namespace + // resource is an ObjectRef to another Kubernetes resource in the namespace // of the Ingress object. If resource is specified, serviceName and servicePort // must not be specified. // +optional @@ -333,12 +337,13 @@ type IngressBackend struct { // resources without a class specified will be assigned this default class. type IngressClass struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec is the desired state of the IngressClass. + // spec is the desired state of the IngressClass. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec IngressClassSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -346,15 +351,15 @@ type IngressClass struct { // IngressClassSpec provides information about the class of an Ingress. type IngressClassSpec struct { - // Controller refers to the name of the controller that should handle this + // controller refers to the name of the controller that should handle this // class. This allows for different "flavors" that are controlled by the - // same controller. For example, you may have different Parameters for the + // same controller. For example, you may have different parameters for the // same implementing controller. This should be specified as a // domain-prefixed path no more than 250 characters in length, e.g. // "acme.io/ingress-controller". This field is immutable. Controller string `json:"controller,omitempty" protobuf:"bytes,1,opt,name=controller"` - // Parameters is a link to a custom resource containing additional + // parameters is a link to a custom resource containing additional // configuration for the controller. This is optional if the controller does // not require extra parameters. // +optional @@ -373,19 +378,23 @@ const ( // IngressClassParametersReference identifies an API object. This can be used // to specify a cluster or namespace-scoped resource. type IngressClassParametersReference struct { - // APIGroup is the group for the resource being referenced. If APIGroup is + // apiGroup is the group for the resource being referenced. If APIGroup is // not specified, the specified Kind must be in the core API group. For any // other third-party types, APIGroup is required. // +optional APIGroup *string `json:"apiGroup,omitempty" protobuf:"bytes,1,opt,name=aPIGroup"` - // Kind is the type of resource being referenced. + + // kind is the type of resource being referenced. Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"` - // Name is the name of resource being referenced. + + // name is the name of resource being referenced. Name string `json:"name" protobuf:"bytes,3,opt,name=name"` - // Scope represents if this refers to a cluster or namespace scoped resource. + + // scope represents if this refers to a cluster or namespace scoped resource. // This may be set to "Cluster" (default) or "Namespace". Scope *string `json:"scope" protobuf:"bytes,4,opt,name=scope"` - // Namespace is the namespace of the resource being referenced. This field is + + // namespace is the namespace of the resource being referenced. This field is // required when scope is set to "Namespace" and must be unset when scope is set to // "Cluster". // +optional @@ -404,6 +413,6 @@ type IngressClassList struct { // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of IngressClasses. + // items is the list of IngressClasses. Items []IngressClass `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go index 195d535c57..b2373669fe 100644 --- a/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go @@ -24,14 +24,14 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_HTTPIngressPath = map[string]string{ "": "HTTPIngressPath associates a path with a backend. Incoming urls matching the path are forwarded to the backend.", - "path": "Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/' and must be present when using PathType with value \"Exact\" or \"Prefix\".", - "pathType": "PathType determines the interpretation of the Path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\n done on a path element by element basis. A path element refers is the\n list of labels in the path split by the '/' separator. A request is a\n match for path p if every p is an element-wise prefix of p of the\n request path. Note that if the last element of the path is a substring\n of the last element in request path, it is not a match (e.g. /foo/bar\n matches /foo/bar/baz, but does not match /foo/barbaz).\n* ImplementationSpecific: Interpretation of the Path matching is up to\n the IngressClass. Implementations can treat this as a separate PathType\n or treat it identically to Prefix or Exact path types.\nImplementations are required to support all path types. Defaults to ImplementationSpecific.", - "backend": "Backend defines the referenced service endpoint to which the traffic will be forwarded to.", + "path": "path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/' and must be present when using PathType with value \"Exact\" or \"Prefix\".", + "pathType": "pathType determines the interpretation of the path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\n done on a path element by element basis. A path element refers is the\n list of labels in the path split by the '/' separator. A request is a\n match for path p if every p is an element-wise prefix of p of the\n request path. Note that if the last element of the path is a substring\n of the last element in request path, it is not a match (e.g. /foo/bar\n matches /foo/bar/baz, but does not match /foo/barbaz).\n* ImplementationSpecific: Interpretation of the Path matching is up to\n the IngressClass. Implementations can treat this as a separate PathType\n or treat it identically to Prefix or Exact path types.\nImplementations are required to support all path types. Defaults to ImplementationSpecific.", + "backend": "backend defines the referenced service endpoint to which the traffic will be forwarded to.", } func (HTTPIngressPath) SwaggerDoc() map[string]string { @@ -40,7 +40,7 @@ func (HTTPIngressPath) SwaggerDoc() map[string]string { var map_HTTPIngressRuleValue = map[string]string{ "": "HTTPIngressRuleValue is a list of http selectors pointing to backends. In the example: http:///? -> backend where where parts of the url correspond to RFC 3986, this resource will be used to match against everything after the last '/' and before the first '?' or '#'.", - "paths": "A collection of paths that map requests to backends.", + "paths": "paths is a collection of paths that map requests to backends.", } func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { @@ -50,8 +50,8 @@ func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { var map_Ingress = map[string]string{ "": "Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", - "status": "Status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Ingress) SwaggerDoc() map[string]string { @@ -60,9 +60,9 @@ func (Ingress) SwaggerDoc() map[string]string { var map_IngressBackend = map[string]string{ "": "IngressBackend describes all endpoints for a given service and port.", - "serviceName": "Specifies the name of the referenced service.", - "servicePort": "Specifies the port of the referenced service.", - "resource": "Resource is an ObjectRef to another Kubernetes resource in the namespace of the Ingress object. If resource is specified, serviceName and servicePort must not be specified.", + "serviceName": "serviceName specifies the name of the referenced service.", + "servicePort": "servicePort Specifies the port of the referenced service.", + "resource": "resource is an ObjectRef to another Kubernetes resource in the namespace of the Ingress object. If resource is specified, serviceName and servicePort must not be specified.", } func (IngressBackend) SwaggerDoc() map[string]string { @@ -72,7 +72,7 @@ func (IngressBackend) SwaggerDoc() map[string]string { var map_IngressClass = map[string]string{ "": "IngressClass represents the class of the Ingress, referenced by the Ingress Spec. The `ingressclass.kubernetes.io/is-default-class` annotation can be used to indicate that an IngressClass should be considered default. When a single IngressClass resource has this annotation set to true, new Ingress resources without a class specified will be assigned this default class.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec is the desired state of the IngressClass. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec is the desired state of the IngressClass. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (IngressClass) SwaggerDoc() map[string]string { @@ -82,7 +82,7 @@ func (IngressClass) SwaggerDoc() map[string]string { var map_IngressClassList = map[string]string{ "": "IngressClassList is a collection of IngressClasses.", "metadata": "Standard list metadata.", - "items": "Items is the list of IngressClasses.", + "items": "items is the list of IngressClasses.", } func (IngressClassList) SwaggerDoc() map[string]string { @@ -91,11 +91,11 @@ func (IngressClassList) SwaggerDoc() map[string]string { var map_IngressClassParametersReference = map[string]string{ "": "IngressClassParametersReference identifies an API object. This can be used to specify a cluster or namespace-scoped resource.", - "apiGroup": "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", - "kind": "Kind is the type of resource being referenced.", - "name": "Name is the name of resource being referenced.", - "scope": "Scope represents if this refers to a cluster or namespace scoped resource. This may be set to \"Cluster\" (default) or \"Namespace\".", - "namespace": "Namespace is the namespace of the resource being referenced. This field is required when scope is set to \"Namespace\" and must be unset when scope is set to \"Cluster\".", + "apiGroup": "apiGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", + "kind": "kind is the type of resource being referenced.", + "name": "name is the name of resource being referenced.", + "scope": "scope represents if this refers to a cluster or namespace scoped resource. This may be set to \"Cluster\" (default) or \"Namespace\".", + "namespace": "namespace is the namespace of the resource being referenced. This field is required when scope is set to \"Namespace\" and must be unset when scope is set to \"Cluster\".", } func (IngressClassParametersReference) SwaggerDoc() map[string]string { @@ -104,8 +104,8 @@ func (IngressClassParametersReference) SwaggerDoc() map[string]string { var map_IngressClassSpec = map[string]string{ "": "IngressClassSpec provides information about the class of an Ingress.", - "controller": "Controller refers to the name of the controller that should handle this class. This allows for different \"flavors\" that are controlled by the same controller. For example, you may have different Parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \"acme.io/ingress-controller\". This field is immutable.", - "parameters": "Parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters.", + "controller": "controller refers to the name of the controller that should handle this class. This allows for different \"flavors\" that are controlled by the same controller. For example, you may have different parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \"acme.io/ingress-controller\". This field is immutable.", + "parameters": "parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters.", } func (IngressClassSpec) SwaggerDoc() map[string]string { @@ -115,7 +115,7 @@ func (IngressClassSpec) SwaggerDoc() map[string]string { var map_IngressList = map[string]string{ "": "IngressList is a collection of Ingress.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of Ingress.", + "items": "items is the list of Ingress.", } func (IngressList) SwaggerDoc() map[string]string { @@ -124,9 +124,9 @@ func (IngressList) SwaggerDoc() map[string]string { var map_IngressLoadBalancerIngress = map[string]string{ "": "IngressLoadBalancerIngress represents the status of a load-balancer ingress point.", - "ip": "IP is set for load-balancer ingress points that are IP based.", - "hostname": "Hostname is set for load-balancer ingress points that are DNS based.", - "ports": "Ports provides information about the ports exposed by this LoadBalancer.", + "ip": "ip is set for load-balancer ingress points that are IP based.", + "hostname": "hostname is set for load-balancer ingress points that are DNS based.", + "ports": "ports provides information about the ports exposed by this LoadBalancer.", } func (IngressLoadBalancerIngress) SwaggerDoc() map[string]string { @@ -135,7 +135,7 @@ func (IngressLoadBalancerIngress) SwaggerDoc() map[string]string { var map_IngressLoadBalancerStatus = map[string]string{ "": "LoadBalancerStatus represents the status of a load-balancer.", - "ingress": "Ingress is a list containing ingress points for the load-balancer.", + "ingress": "ingress is a list containing ingress points for the load-balancer.", } func (IngressLoadBalancerStatus) SwaggerDoc() map[string]string { @@ -144,9 +144,9 @@ func (IngressLoadBalancerStatus) SwaggerDoc() map[string]string { var map_IngressPortStatus = map[string]string{ "": "IngressPortStatus represents the error condition of a service port", - "port": "Port is the port number of the ingress port.", - "protocol": "Protocol is the protocol of the ingress port. The supported values are: \"TCP\", \"UDP\", \"SCTP\"", - "error": "Error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use\n CamelCase names\n- cloud provider specific error values must have names that comply with the\n format foo.example.com/CamelCase.", + "port": "port is the port number of the ingress port.", + "protocol": "protocol is the protocol of the ingress port. The supported values are: \"TCP\", \"UDP\", \"SCTP\"", + "error": "error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use\n CamelCase names\n- cloud provider specific error values must have names that comply with the\n format foo.example.com/CamelCase.", } func (IngressPortStatus) SwaggerDoc() map[string]string { @@ -155,7 +155,7 @@ func (IngressPortStatus) SwaggerDoc() map[string]string { var map_IngressRule = map[string]string{ "": "IngressRule represents the rules mapping the paths under a specified host to the related backend services. Incoming requests are first evaluated for a host match, then routed to the backend associated with the matching IngressRuleValue.", - "host": "Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to\n the IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.\n\nHost can be \"precise\" which is a domain name without the terminating dot of a network host (e.g. \"foo.bar.com\") or \"wildcard\", which is a domain name prefixed with a single wildcard label (e.g. \"*.foo.com\"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == \"*\"). Requests will be matched against the Host field in the following way: 1. If Host is precise, the request matches this rule if the http host header is equal to Host. 2. If Host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.", + "host": "host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to\n the IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.\n\nhost can be \"precise\" which is a domain name without the terminating dot of a network host (e.g. \"foo.bar.com\") or \"wildcard\", which is a domain name prefixed with a single wildcard label (e.g. \"*.foo.com\"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == \"*\"). Requests will be matched against the Host field in the following way: 1. If Host is precise, the request matches this rule if the http host header is equal to Host. 2. If Host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.", } func (IngressRule) SwaggerDoc() map[string]string { @@ -172,10 +172,10 @@ func (IngressRuleValue) SwaggerDoc() map[string]string { var map_IngressSpec = map[string]string{ "": "IngressSpec describes the Ingress the user wishes to exist.", - "ingressClassName": "IngressClassName is the name of the IngressClass cluster resource. The associated IngressClass defines which controller will implement the resource. This replaces the deprecated `kubernetes.io/ingress.class` annotation. For backwards compatibility, when that annotation is set, it must be given precedence over this field. The controller may emit a warning if the field and annotation have different values. Implementations of this API should ignore Ingresses without a class specified. An IngressClass resource may be marked as default, which can be used to set a default value for this field. For more information, refer to the IngressClass documentation.", - "backend": "A default backend capable of servicing requests that don't match any rule. At least one of 'backend' or 'rules' must be specified. This field is optional to allow the loadbalancer controller or defaulting logic to specify a global default.", - "tls": "TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.", - "rules": "A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.", + "ingressClassName": "ingressClassName is the name of the IngressClass cluster resource. The associated IngressClass defines which controller will implement the resource. This replaces the deprecated `kubernetes.io/ingress.class` annotation. For backwards compatibility, when that annotation is set, it must be given precedence over this field. The controller may emit a warning if the field and annotation have different values. Implementations of this API should ignore Ingresses without a class specified. An IngressClass resource may be marked as default, which can be used to set a default value for this field. For more information, refer to the IngressClass documentation.", + "backend": "backend is the default backend capable of servicing requests that don't match any rule. At least one of 'backend' or 'rules' must be specified. This field is optional to allow the loadbalancer controller or defaulting logic to specify a global default.", + "tls": "tls represents the TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.", + "rules": "rules is a list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.", } func (IngressSpec) SwaggerDoc() map[string]string { @@ -183,8 +183,8 @@ func (IngressSpec) SwaggerDoc() map[string]string { } var map_IngressStatus = map[string]string{ - "": "IngressStatus describe the current state of the Ingress.", - "loadBalancer": "LoadBalancer contains the current status of the load-balancer.", + "": "IngressStatus describes the current state of the Ingress.", + "loadBalancer": "loadBalancer contains the current status of the load-balancer.", } func (IngressStatus) SwaggerDoc() map[string]string { @@ -193,8 +193,8 @@ func (IngressStatus) SwaggerDoc() map[string]string { var map_IngressTLS = map[string]string{ "": "IngressTLS describes the transport layer security associated with an Ingress.", - "hosts": "Hosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.", - "secretName": "SecretName is the name of the secret used to terminate TLS traffic on port 443. Field is left optional to allow TLS routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \"Host\" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing.", + "hosts": "hosts is a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.", + "secretName": "secretName is the name of the secret used to terminate TLS traffic on port 443. Field is left optional to allow TLS routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \"Host\" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing.", } func (IngressTLS) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/node/v1/generated.proto b/vendor/k8s.io/api/node/v1/generated.proto index 294be85b62..0152d5e3ab 100644 --- a/vendor/k8s.io/api/node/v1/generated.proto +++ b/vendor/k8s.io/api/node/v1/generated.proto @@ -32,7 +32,7 @@ option go_package = "k8s.io/api/node/v1"; // Overhead structure represents the resource overhead associated with running a pod. message Overhead { - // PodFixed represents the fixed resource overhead associated with running a pod. + // podFixed represents the fixed resource overhead associated with running a pod. // +optional map podFixed = 1; } @@ -49,7 +49,7 @@ message RuntimeClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Handler specifies the underlying runtime and configuration that the CRI + // handler specifies the underlying runtime and configuration that the CRI // implementation will use to handle pods of this class. The possible values // are specific to the node & CRI configuration. It is assumed that all // handlers are available on every node, and handlers of the same name are @@ -61,13 +61,13 @@ message RuntimeClass { // and is immutable. optional string handler = 2; - // Overhead represents the resource overhead associated with running a pod for a + // overhead represents the resource overhead associated with running a pod for a // given RuntimeClass. For more details, see // https://kubernetes.io/docs/concepts/scheduling-eviction/pod-overhead/ // +optional optional Overhead overhead = 3; - // Scheduling holds the scheduling constraints to ensure that pods running + // scheduling holds the scheduling constraints to ensure that pods running // with this RuntimeClass are scheduled to nodes that support it. // If scheduling is nil, this RuntimeClass is assumed to be supported by all // nodes. @@ -82,7 +82,7 @@ message RuntimeClassList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of schema objects. + // items is a list of schema objects. repeated RuntimeClass items = 2; } diff --git a/vendor/k8s.io/api/node/v1/types.go b/vendor/k8s.io/api/node/v1/types.go index 984696d983..b00f58772c 100644 --- a/vendor/k8s.io/api/node/v1/types.go +++ b/vendor/k8s.io/api/node/v1/types.go @@ -34,11 +34,12 @@ import ( // https://kubernetes.io/docs/concepts/containers/runtime-class/ type RuntimeClass struct { metav1.TypeMeta `json:",inline"` + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Handler specifies the underlying runtime and configuration that the CRI + // handler specifies the underlying runtime and configuration that the CRI // implementation will use to handle pods of this class. The possible values // are specific to the node & CRI configuration. It is assumed that all // handlers are available on every node, and handlers of the same name are @@ -50,13 +51,13 @@ type RuntimeClass struct { // and is immutable. Handler string `json:"handler" protobuf:"bytes,2,opt,name=handler"` - // Overhead represents the resource overhead associated with running a pod for a + // overhead represents the resource overhead associated with running a pod for a // given RuntimeClass. For more details, see // https://kubernetes.io/docs/concepts/scheduling-eviction/pod-overhead/ // +optional Overhead *Overhead `json:"overhead,omitempty" protobuf:"bytes,3,opt,name=overhead"` - // Scheduling holds the scheduling constraints to ensure that pods running + // scheduling holds the scheduling constraints to ensure that pods running // with this RuntimeClass are scheduled to nodes that support it. // If scheduling is nil, this RuntimeClass is assumed to be supported by all // nodes. @@ -66,7 +67,7 @@ type RuntimeClass struct { // Overhead structure represents the resource overhead associated with running a pod. type Overhead struct { - // PodFixed represents the fixed resource overhead associated with running a pod. + // podFixed represents the fixed resource overhead associated with running a pod. // +optional PodFixed corev1.ResourceList `json:"podFixed,omitempty" protobuf:"bytes,1,opt,name=podFixed,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName,castvalue=k8s.io/apimachinery/pkg/api/resource.Quantity"` } @@ -96,11 +97,12 @@ type Scheduling struct { // RuntimeClassList is a list of RuntimeClass objects. type RuntimeClassList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of schema objects. + // items is a list of schema objects. Items []RuntimeClass `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/node/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/node/v1/types_swagger_doc_generated.go index a9eddc60ea..f5e6b32779 100644 --- a/vendor/k8s.io/api/node/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/node/v1/types_swagger_doc_generated.go @@ -24,12 +24,12 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Overhead = map[string]string{ "": "Overhead structure represents the resource overhead associated with running a pod.", - "podFixed": "PodFixed represents the fixed resource overhead associated with running a pod.", + "podFixed": "podFixed represents the fixed resource overhead associated with running a pod.", } func (Overhead) SwaggerDoc() map[string]string { @@ -39,9 +39,9 @@ func (Overhead) SwaggerDoc() map[string]string { var map_RuntimeClass = map[string]string{ "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://kubernetes.io/docs/concepts/containers/runtime-class/", "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "handler": "Handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", - "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see\n https://kubernetes.io/docs/concepts/scheduling-eviction/pod-overhead/", - "scheduling": "Scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", + "handler": "handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", + "overhead": "overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see\n https://kubernetes.io/docs/concepts/scheduling-eviction/pod-overhead/", + "scheduling": "scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", } func (RuntimeClass) SwaggerDoc() map[string]string { @@ -51,7 +51,7 @@ func (RuntimeClass) SwaggerDoc() map[string]string { var map_RuntimeClassList = map[string]string{ "": "RuntimeClassList is a list of RuntimeClass objects.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", + "items": "items is a list of schema objects.", } func (RuntimeClassList) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/node/v1alpha1/generated.proto b/vendor/k8s.io/api/node/v1alpha1/generated.proto index d46e0ec6aa..4673e9261d 100644 --- a/vendor/k8s.io/api/node/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/node/v1alpha1/generated.proto @@ -32,7 +32,7 @@ option go_package = "k8s.io/api/node/v1alpha1"; // Overhead structure represents the resource overhead associated with running a pod. message Overhead { - // PodFixed represents the fixed resource overhead associated with running a pod. + // podFixed represents the fixed resource overhead associated with running a pod. // +optional map podFixed = 1; } @@ -49,7 +49,7 @@ message RuntimeClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the RuntimeClass + // spec represents specification of the RuntimeClass // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status optional RuntimeClassSpec spec = 2; } @@ -61,7 +61,7 @@ message RuntimeClassList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of schema objects. + // items is a list of schema objects. repeated RuntimeClass items = 2; } @@ -70,7 +70,7 @@ message RuntimeClassList { // Interface (CRI) implementation, as well as any other components that need to // understand how the pod will be run. The RuntimeClassSpec is immutable. message RuntimeClassSpec { - // RuntimeHandler specifies the underlying runtime and configuration that the + // runtimeHandler specifies the underlying runtime and configuration that the // CRI implementation will use to handle pods of this class. The possible // values are specific to the node & CRI configuration. It is assumed that // all handlers are available on every node, and handlers of the same name are @@ -78,17 +78,17 @@ message RuntimeClassSpec { // For example, a handler called "runc" might specify that the runc OCI // runtime (using native Linux containers) will be used to run the containers // in a pod. - // The RuntimeHandler must be lowercase, conform to the DNS Label (RFC 1123) + // The runtimeHandler must be lowercase, conform to the DNS Label (RFC 1123) // requirements, and is immutable. optional string runtimeHandler = 1; - // Overhead represents the resource overhead associated with running a pod for a + // overhead represents the resource overhead associated with running a pod for a // given RuntimeClass. For more details, see // https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md // +optional optional Overhead overhead = 2; - // Scheduling holds the scheduling constraints to ensure that pods running + // scheduling holds the scheduling constraints to ensure that pods running // with this RuntimeClass are scheduled to nodes that support it. // If scheduling is nil, this RuntimeClass is assumed to be supported by all // nodes. diff --git a/vendor/k8s.io/api/node/v1alpha1/types.go b/vendor/k8s.io/api/node/v1alpha1/types.go index 588c8e4c0a..bf9e284bf7 100644 --- a/vendor/k8s.io/api/node/v1alpha1/types.go +++ b/vendor/k8s.io/api/node/v1alpha1/types.go @@ -34,11 +34,12 @@ import ( // https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class type RuntimeClass struct { metav1.TypeMeta `json:",inline"` + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the RuntimeClass + // spec represents specification of the RuntimeClass // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status Spec RuntimeClassSpec `json:"spec" protobuf:"bytes,2,name=spec"` } @@ -48,7 +49,7 @@ type RuntimeClass struct { // Interface (CRI) implementation, as well as any other components that need to // understand how the pod will be run. The RuntimeClassSpec is immutable. type RuntimeClassSpec struct { - // RuntimeHandler specifies the underlying runtime and configuration that the + // runtimeHandler specifies the underlying runtime and configuration that the // CRI implementation will use to handle pods of this class. The possible // values are specific to the node & CRI configuration. It is assumed that // all handlers are available on every node, and handlers of the same name are @@ -56,17 +57,17 @@ type RuntimeClassSpec struct { // For example, a handler called "runc" might specify that the runc OCI // runtime (using native Linux containers) will be used to run the containers // in a pod. - // The RuntimeHandler must be lowercase, conform to the DNS Label (RFC 1123) + // The runtimeHandler must be lowercase, conform to the DNS Label (RFC 1123) // requirements, and is immutable. RuntimeHandler string `json:"runtimeHandler" protobuf:"bytes,1,opt,name=runtimeHandler"` - // Overhead represents the resource overhead associated with running a pod for a + // overhead represents the resource overhead associated with running a pod for a // given RuntimeClass. For more details, see // https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md // +optional Overhead *Overhead `json:"overhead,omitempty" protobuf:"bytes,2,opt,name=overhead"` - // Scheduling holds the scheduling constraints to ensure that pods running + // scheduling holds the scheduling constraints to ensure that pods running // with this RuntimeClass are scheduled to nodes that support it. // If scheduling is nil, this RuntimeClass is assumed to be supported by all // nodes. @@ -76,7 +77,7 @@ type RuntimeClassSpec struct { // Overhead structure represents the resource overhead associated with running a pod. type Overhead struct { - // PodFixed represents the fixed resource overhead associated with running a pod. + // podFixed represents the fixed resource overhead associated with running a pod. // +optional PodFixed corev1.ResourceList `json:"podFixed,omitempty" protobuf:"bytes,1,opt,name=podFixed,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName,castvalue=k8s.io/apimachinery/pkg/api/resource.Quantity"` } @@ -106,11 +107,12 @@ type Scheduling struct { // RuntimeClassList is a list of RuntimeClass objects. type RuntimeClassList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of schema objects. + // items is a list of schema objects. Items []RuntimeClass `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go index 96413754f0..ccc1b70853 100644 --- a/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go @@ -24,12 +24,12 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Overhead = map[string]string{ "": "Overhead structure represents the resource overhead associated with running a pod.", - "podFixed": "PodFixed represents the fixed resource overhead associated with running a pod.", + "podFixed": "podFixed represents the fixed resource overhead associated with running a pod.", } func (Overhead) SwaggerDoc() map[string]string { @@ -39,7 +39,7 @@ func (Overhead) SwaggerDoc() map[string]string { var map_RuntimeClass = map[string]string{ "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the RuntimeClass More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec represents specification of the RuntimeClass More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (RuntimeClass) SwaggerDoc() map[string]string { @@ -49,7 +49,7 @@ func (RuntimeClass) SwaggerDoc() map[string]string { var map_RuntimeClassList = map[string]string{ "": "RuntimeClassList is a list of RuntimeClass objects.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", + "items": "items is a list of schema objects.", } func (RuntimeClassList) SwaggerDoc() map[string]string { @@ -58,9 +58,9 @@ func (RuntimeClassList) SwaggerDoc() map[string]string { var map_RuntimeClassSpec = map[string]string{ "": "RuntimeClassSpec is a specification of a RuntimeClass. It contains parameters that are required to describe the RuntimeClass to the Container Runtime Interface (CRI) implementation, as well as any other components that need to understand how the pod will be run. The RuntimeClassSpec is immutable.", - "runtimeHandler": "RuntimeHandler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The RuntimeHandler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", - "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md", - "scheduling": "Scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", + "runtimeHandler": "runtimeHandler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The runtimeHandler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", + "overhead": "overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md", + "scheduling": "scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", } func (RuntimeClassSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/node/v1beta1/generated.proto b/vendor/k8s.io/api/node/v1beta1/generated.proto index 8ffad69731..54dbc0995a 100644 --- a/vendor/k8s.io/api/node/v1beta1/generated.proto +++ b/vendor/k8s.io/api/node/v1beta1/generated.proto @@ -32,7 +32,7 @@ option go_package = "k8s.io/api/node/v1beta1"; // Overhead structure represents the resource overhead associated with running a pod. message Overhead { - // PodFixed represents the fixed resource overhead associated with running a pod. + // podFixed represents the fixed resource overhead associated with running a pod. // +optional map podFixed = 1; } @@ -49,7 +49,7 @@ message RuntimeClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Handler specifies the underlying runtime and configuration that the CRI + // handler specifies the underlying runtime and configuration that the CRI // implementation will use to handle pods of this class. The possible values // are specific to the node & CRI configuration. It is assumed that all // handlers are available on every node, and handlers of the same name are @@ -57,17 +57,17 @@ message RuntimeClass { // For example, a handler called "runc" might specify that the runc OCI // runtime (using native Linux containers) will be used to run the containers // in a pod. - // The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, + // The handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, // and is immutable. optional string handler = 2; - // Overhead represents the resource overhead associated with running a pod for a + // overhead represents the resource overhead associated with running a pod for a // given RuntimeClass. For more details, see // https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md // +optional optional Overhead overhead = 3; - // Scheduling holds the scheduling constraints to ensure that pods running + // scheduling holds the scheduling constraints to ensure that pods running // with this RuntimeClass are scheduled to nodes that support it. // If scheduling is nil, this RuntimeClass is assumed to be supported by all // nodes. @@ -82,7 +82,7 @@ message RuntimeClassList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of schema objects. + // items is a list of schema objects. repeated RuntimeClass items = 2; } diff --git a/vendor/k8s.io/api/node/v1beta1/types.go b/vendor/k8s.io/api/node/v1beta1/types.go index b924cb421a..74ecca26ad 100644 --- a/vendor/k8s.io/api/node/v1beta1/types.go +++ b/vendor/k8s.io/api/node/v1beta1/types.go @@ -36,11 +36,12 @@ import ( // https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class type RuntimeClass struct { metav1.TypeMeta `json:",inline"` + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Handler specifies the underlying runtime and configuration that the CRI + // handler specifies the underlying runtime and configuration that the CRI // implementation will use to handle pods of this class. The possible values // are specific to the node & CRI configuration. It is assumed that all // handlers are available on every node, and handlers of the same name are @@ -48,17 +49,17 @@ type RuntimeClass struct { // For example, a handler called "runc" might specify that the runc OCI // runtime (using native Linux containers) will be used to run the containers // in a pod. - // The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, + // The handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, // and is immutable. Handler string `json:"handler" protobuf:"bytes,2,opt,name=handler"` - // Overhead represents the resource overhead associated with running a pod for a + // overhead represents the resource overhead associated with running a pod for a // given RuntimeClass. For more details, see // https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md // +optional Overhead *Overhead `json:"overhead,omitempty" protobuf:"bytes,3,opt,name=overhead"` - // Scheduling holds the scheduling constraints to ensure that pods running + // scheduling holds the scheduling constraints to ensure that pods running // with this RuntimeClass are scheduled to nodes that support it. // If scheduling is nil, this RuntimeClass is assumed to be supported by all // nodes. @@ -68,7 +69,7 @@ type RuntimeClass struct { // Overhead structure represents the resource overhead associated with running a pod. type Overhead struct { - // PodFixed represents the fixed resource overhead associated with running a pod. + // podFixed represents the fixed resource overhead associated with running a pod. // +optional PodFixed corev1.ResourceList `json:"podFixed,omitempty" protobuf:"bytes,1,opt,name=podFixed,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName,castvalue=k8s.io/apimachinery/pkg/api/resource.Quantity"` } @@ -100,11 +101,12 @@ type Scheduling struct { // RuntimeClassList is a list of RuntimeClass objects. type RuntimeClassList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of schema objects. + // items is a list of schema objects. Items []RuntimeClass `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go index fec4398b2e..086105ecc5 100644 --- a/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go @@ -24,12 +24,12 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Overhead = map[string]string{ "": "Overhead structure represents the resource overhead associated with running a pod.", - "podFixed": "PodFixed represents the fixed resource overhead associated with running a pod.", + "podFixed": "podFixed represents the fixed resource overhead associated with running a pod.", } func (Overhead) SwaggerDoc() map[string]string { @@ -39,9 +39,9 @@ func (Overhead) SwaggerDoc() map[string]string { var map_RuntimeClass = map[string]string{ "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "handler": "Handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", - "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md", - "scheduling": "Scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", + "handler": "handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", + "overhead": "overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md", + "scheduling": "scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", } func (RuntimeClass) SwaggerDoc() map[string]string { @@ -51,7 +51,7 @@ func (RuntimeClass) SwaggerDoc() map[string]string { var map_RuntimeClassList = map[string]string{ "": "RuntimeClassList is a list of RuntimeClass objects.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", + "items": "items is a list of schema objects.", } func (RuntimeClassList) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/policy/v1/generated.proto b/vendor/k8s.io/api/policy/v1/generated.proto index 0a1e010b91..a79e710284 100644 --- a/vendor/k8s.io/api/policy/v1/generated.proto +++ b/vendor/k8s.io/api/policy/v1/generated.proto @@ -116,8 +116,8 @@ message PodDisruptionBudgetSpec { // Clients making eviction decisions should disallow eviction of unhealthy pods // if they encounter an unrecognized policy in this field. // - // This field is alpha-level. The eviction API uses this field when - // the feature gate PDBUnhealthyPodEvictionPolicy is enabled (disabled by default). + // This field is beta-level. The eviction API uses this field when + // the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default). // +optional optional string unhealthyPodEvictionPolicy = 4; } diff --git a/vendor/k8s.io/api/policy/v1/types.go b/vendor/k8s.io/api/policy/v1/types.go index 6aec30b89b..45b9550f4a 100644 --- a/vendor/k8s.io/api/policy/v1/types.go +++ b/vendor/k8s.io/api/policy/v1/types.go @@ -71,8 +71,8 @@ type PodDisruptionBudgetSpec struct { // Clients making eviction decisions should disallow eviction of unhealthy pods // if they encounter an unrecognized policy in this field. // - // This field is alpha-level. The eviction API uses this field when - // the feature gate PDBUnhealthyPodEvictionPolicy is enabled (disabled by default). + // This field is beta-level. The eviction API uses this field when + // the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default). // +optional UnhealthyPodEvictionPolicy *UnhealthyPodEvictionPolicyType `json:"unhealthyPodEvictionPolicy,omitempty" protobuf:"bytes,4,opt,name=unhealthyPodEvictionPolicy"` } diff --git a/vendor/k8s.io/api/policy/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/policy/v1/types_swagger_doc_generated.go index 582b28c15b..799b0794a9 100644 --- a/vendor/k8s.io/api/policy/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/policy/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Eviction = map[string]string{ @@ -63,7 +63,7 @@ var map_PodDisruptionBudgetSpec = map[string]string{ "minAvailable": "An eviction is allowed if at least \"minAvailable\" pods selected by \"selector\" will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying \"100%\".", "selector": "Label query over pods whose evictions are managed by the disruption budget. A null selector will match no pods, while an empty ({}) selector will select all pods within the namespace.", "maxUnavailable": "An eviction is allowed if at most \"maxUnavailable\" pods selected by \"selector\" are unavailable after the eviction, i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions by specifying 0. This is a mutually exclusive setting with \"minAvailable\".", - "unhealthyPodEvictionPolicy": "UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods should be considered for eviction. Current implementation considers healthy pods, as pods that have status.conditions item with type=\"Ready\",status=\"True\".\n\nValid policies are IfHealthyBudget and AlwaysAllow. If no policy is specified, the default behavior will be used, which corresponds to the IfHealthyBudget policy.\n\nIfHealthyBudget policy means that running pods (status.phase=\"Running\"), but not yet healthy can be evicted only if the guarded application is not disrupted (status.currentHealthy is at least equal to status.desiredHealthy). Healthy pods will be subject to the PDB for eviction.\n\nAlwaysAllow policy means that all running pods (status.phase=\"Running\"), but not yet healthy are considered disrupted and can be evicted regardless of whether the criteria in a PDB is met. This means perspective running pods of a disrupted application might not get a chance to become healthy. Healthy pods will be subject to the PDB for eviction.\n\nAdditional policies may be added in the future. Clients making eviction decisions should disallow eviction of unhealthy pods if they encounter an unrecognized policy in this field.\n\nThis field is alpha-level. The eviction API uses this field when the feature gate PDBUnhealthyPodEvictionPolicy is enabled (disabled by default).", + "unhealthyPodEvictionPolicy": "UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods should be considered for eviction. Current implementation considers healthy pods, as pods that have status.conditions item with type=\"Ready\",status=\"True\".\n\nValid policies are IfHealthyBudget and AlwaysAllow. If no policy is specified, the default behavior will be used, which corresponds to the IfHealthyBudget policy.\n\nIfHealthyBudget policy means that running pods (status.phase=\"Running\"), but not yet healthy can be evicted only if the guarded application is not disrupted (status.currentHealthy is at least equal to status.desiredHealthy). Healthy pods will be subject to the PDB for eviction.\n\nAlwaysAllow policy means that all running pods (status.phase=\"Running\"), but not yet healthy are considered disrupted and can be evicted regardless of whether the criteria in a PDB is met. This means perspective running pods of a disrupted application might not get a chance to become healthy. Healthy pods will be subject to the PDB for eviction.\n\nAdditional policies may be added in the future. Clients making eviction decisions should disallow eviction of unhealthy pods if they encounter an unrecognized policy in this field.\n\nThis field is beta-level. The eviction API uses this field when the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default).", } func (PodDisruptionBudgetSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.proto b/vendor/k8s.io/api/policy/v1beta1/generated.proto index 989b48458c..16301c236a 100644 --- a/vendor/k8s.io/api/policy/v1beta1/generated.proto +++ b/vendor/k8s.io/api/policy/v1beta1/generated.proto @@ -177,8 +177,8 @@ message PodDisruptionBudgetSpec { // Clients making eviction decisions should disallow eviction of unhealthy pods // if they encounter an unrecognized policy in this field. // - // This field is alpha-level. The eviction API uses this field when - // the feature gate PDBUnhealthyPodEvictionPolicy is enabled (disabled by default). + // This field is beta-level. The eviction API uses this field when + // the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default). // +optional optional string unhealthyPodEvictionPolicy = 4; } diff --git a/vendor/k8s.io/api/policy/v1beta1/types.go b/vendor/k8s.io/api/policy/v1beta1/types.go index 863b2b8732..1e6b075e32 100644 --- a/vendor/k8s.io/api/policy/v1beta1/types.go +++ b/vendor/k8s.io/api/policy/v1beta1/types.go @@ -69,8 +69,8 @@ type PodDisruptionBudgetSpec struct { // Clients making eviction decisions should disallow eviction of unhealthy pods // if they encounter an unrecognized policy in this field. // - // This field is alpha-level. The eviction API uses this field when - // the feature gate PDBUnhealthyPodEvictionPolicy is enabled (disabled by default). + // This field is beta-level. The eviction API uses this field when + // the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default). // +optional UnhealthyPodEvictionPolicy *UnhealthyPodEvictionPolicyType `json:"unhealthyPodEvictionPolicy,omitempty" protobuf:"bytes,4,opt,name=unhealthyPodEvictionPolicy"` } diff --git a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go index cebba07f47..266a9a853a 100644 --- a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AllowedCSIDriver = map[string]string{ @@ -121,7 +121,7 @@ var map_PodDisruptionBudgetSpec = map[string]string{ "minAvailable": "An eviction is allowed if at least \"minAvailable\" pods selected by \"selector\" will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying \"100%\".", "selector": "Label query over pods whose evictions are managed by the disruption budget. A null selector selects no pods. An empty selector ({}) also selects no pods, which differs from standard behavior of selecting all pods. In policy/v1, an empty selector will select all pods in the namespace.", "maxUnavailable": "An eviction is allowed if at most \"maxUnavailable\" pods selected by \"selector\" are unavailable after the eviction, i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions by specifying 0. This is a mutually exclusive setting with \"minAvailable\".", - "unhealthyPodEvictionPolicy": "UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods should be considered for eviction. Current implementation considers healthy pods, as pods that have status.conditions item with type=\"Ready\",status=\"True\".\n\nValid policies are IfHealthyBudget and AlwaysAllow. If no policy is specified, the default behavior will be used, which corresponds to the IfHealthyBudget policy.\n\nIfHealthyBudget policy means that running pods (status.phase=\"Running\"), but not yet healthy can be evicted only if the guarded application is not disrupted (status.currentHealthy is at least equal to status.desiredHealthy). Healthy pods will be subject to the PDB for eviction.\n\nAlwaysAllow policy means that all running pods (status.phase=\"Running\"), but not yet healthy are considered disrupted and can be evicted regardless of whether the criteria in a PDB is met. This means perspective running pods of a disrupted application might not get a chance to become healthy. Healthy pods will be subject to the PDB for eviction.\n\nAdditional policies may be added in the future. Clients making eviction decisions should disallow eviction of unhealthy pods if they encounter an unrecognized policy in this field.\n\nThis field is alpha-level. The eviction API uses this field when the feature gate PDBUnhealthyPodEvictionPolicy is enabled (disabled by default).", + "unhealthyPodEvictionPolicy": "UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods should be considered for eviction. Current implementation considers healthy pods, as pods that have status.conditions item with type=\"Ready\",status=\"True\".\n\nValid policies are IfHealthyBudget and AlwaysAllow. If no policy is specified, the default behavior will be used, which corresponds to the IfHealthyBudget policy.\n\nIfHealthyBudget policy means that running pods (status.phase=\"Running\"), but not yet healthy can be evicted only if the guarded application is not disrupted (status.currentHealthy is at least equal to status.desiredHealthy). Healthy pods will be subject to the PDB for eviction.\n\nAlwaysAllow policy means that all running pods (status.phase=\"Running\"), but not yet healthy are considered disrupted and can be evicted regardless of whether the criteria in a PDB is met. This means perspective running pods of a disrupted application might not get a chance to become healthy. Healthy pods will be subject to the PDB for eviction.\n\nAdditional policies may be added in the future. Clients making eviction decisions should disallow eviction of unhealthy pods if they encounter an unrecognized policy in this field.\n\nThis field is beta-level. The eviction API uses this field when the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default).", } func (PodDisruptionBudgetSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go index 63aa4ed7b6..370398198b 100644 --- a/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AggregationRule = map[string]string{ diff --git a/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go index 08578aba92..6708f3e58e 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AggregationRule = map[string]string{ diff --git a/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go index db9525832b..fff1fe40fa 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AggregationRule = map[string]string{ diff --git a/vendor/k8s.io/api/resource/v1alpha1/doc.go b/vendor/k8s.io/api/resource/v1alpha2/doc.go similarity index 84% rename from vendor/k8s.io/api/resource/v1alpha1/doc.go rename to vendor/k8s.io/api/resource/v1alpha2/doc.go index 8fa577fabc..d9c20e089d 100644 --- a/vendor/k8s.io/api/resource/v1alpha1/doc.go +++ b/vendor/k8s.io/api/resource/v1alpha2/doc.go @@ -20,5 +20,5 @@ limitations under the License. // +groupName=resource.k8s.io -// Package v1alpha1 is the v1alpha1 version of the resource API. -package v1alpha1 // import "k8s.io/api/resource/v1alpha1" +// Package v1alpha2 is the v1alpha2 version of the resource API. +package v1alpha2 // import "k8s.io/api/resource/v1alpha2" diff --git a/vendor/k8s.io/api/resource/v1alpha1/generated.pb.go b/vendor/k8s.io/api/resource/v1alpha2/generated.pb.go similarity index 83% rename from vendor/k8s.io/api/resource/v1alpha1/generated.pb.go rename to vendor/k8s.io/api/resource/v1alpha2/generated.pb.go index 632ad04259..2e8f9c724a 100644 --- a/vendor/k8s.io/api/resource/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/resource/v1alpha2/generated.pb.go @@ -15,9 +15,9 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/resource/v1alpha1/generated.proto +// source: k8s.io/kubernetes/vendor/k8s.io/api/resource/v1alpha2/generated.proto -package v1alpha1 +package v1alpha2 import ( fmt "fmt" @@ -49,7 +49,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *AllocationResult) Reset() { *m = AllocationResult{} } func (*AllocationResult) ProtoMessage() {} func (*AllocationResult) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{0} + return fileDescriptor_3add37bbd52889e0, []int{0} } func (m *AllocationResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -74,15 +74,15 @@ func (m *AllocationResult) XXX_DiscardUnknown() { var xxx_messageInfo_AllocationResult proto.InternalMessageInfo -func (m *PodScheduling) Reset() { *m = PodScheduling{} } -func (*PodScheduling) ProtoMessage() {} -func (*PodScheduling) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{1} +func (m *PodSchedulingContext) Reset() { *m = PodSchedulingContext{} } +func (*PodSchedulingContext) ProtoMessage() {} +func (*PodSchedulingContext) Descriptor() ([]byte, []int) { + return fileDescriptor_3add37bbd52889e0, []int{1} } -func (m *PodScheduling) XXX_Unmarshal(b []byte) error { +func (m *PodSchedulingContext) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *PodScheduling) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *PodSchedulingContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -90,27 +90,27 @@ func (m *PodScheduling) XXX_Marshal(b []byte, deterministic bool) ([]byte, error } return b[:n], nil } -func (m *PodScheduling) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodScheduling.Merge(m, src) +func (m *PodSchedulingContext) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSchedulingContext.Merge(m, src) } -func (m *PodScheduling) XXX_Size() int { +func (m *PodSchedulingContext) XXX_Size() int { return m.Size() } -func (m *PodScheduling) XXX_DiscardUnknown() { - xxx_messageInfo_PodScheduling.DiscardUnknown(m) +func (m *PodSchedulingContext) XXX_DiscardUnknown() { + xxx_messageInfo_PodSchedulingContext.DiscardUnknown(m) } -var xxx_messageInfo_PodScheduling proto.InternalMessageInfo +var xxx_messageInfo_PodSchedulingContext proto.InternalMessageInfo -func (m *PodSchedulingList) Reset() { *m = PodSchedulingList{} } -func (*PodSchedulingList) ProtoMessage() {} -func (*PodSchedulingList) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{2} +func (m *PodSchedulingContextList) Reset() { *m = PodSchedulingContextList{} } +func (*PodSchedulingContextList) ProtoMessage() {} +func (*PodSchedulingContextList) Descriptor() ([]byte, []int) { + return fileDescriptor_3add37bbd52889e0, []int{2} } -func (m *PodSchedulingList) XXX_Unmarshal(b []byte) error { +func (m *PodSchedulingContextList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *PodSchedulingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *PodSchedulingContextList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -118,27 +118,27 @@ func (m *PodSchedulingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, e } return b[:n], nil } -func (m *PodSchedulingList) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSchedulingList.Merge(m, src) +func (m *PodSchedulingContextList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSchedulingContextList.Merge(m, src) } -func (m *PodSchedulingList) XXX_Size() int { +func (m *PodSchedulingContextList) XXX_Size() int { return m.Size() } -func (m *PodSchedulingList) XXX_DiscardUnknown() { - xxx_messageInfo_PodSchedulingList.DiscardUnknown(m) +func (m *PodSchedulingContextList) XXX_DiscardUnknown() { + xxx_messageInfo_PodSchedulingContextList.DiscardUnknown(m) } -var xxx_messageInfo_PodSchedulingList proto.InternalMessageInfo +var xxx_messageInfo_PodSchedulingContextList proto.InternalMessageInfo -func (m *PodSchedulingSpec) Reset() { *m = PodSchedulingSpec{} } -func (*PodSchedulingSpec) ProtoMessage() {} -func (*PodSchedulingSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{3} +func (m *PodSchedulingContextSpec) Reset() { *m = PodSchedulingContextSpec{} } +func (*PodSchedulingContextSpec) ProtoMessage() {} +func (*PodSchedulingContextSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_3add37bbd52889e0, []int{3} } -func (m *PodSchedulingSpec) XXX_Unmarshal(b []byte) error { +func (m *PodSchedulingContextSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *PodSchedulingSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *PodSchedulingContextSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -146,27 +146,27 @@ func (m *PodSchedulingSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, e } return b[:n], nil } -func (m *PodSchedulingSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSchedulingSpec.Merge(m, src) +func (m *PodSchedulingContextSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSchedulingContextSpec.Merge(m, src) } -func (m *PodSchedulingSpec) XXX_Size() int { +func (m *PodSchedulingContextSpec) XXX_Size() int { return m.Size() } -func (m *PodSchedulingSpec) XXX_DiscardUnknown() { - xxx_messageInfo_PodSchedulingSpec.DiscardUnknown(m) +func (m *PodSchedulingContextSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PodSchedulingContextSpec.DiscardUnknown(m) } -var xxx_messageInfo_PodSchedulingSpec proto.InternalMessageInfo +var xxx_messageInfo_PodSchedulingContextSpec proto.InternalMessageInfo -func (m *PodSchedulingStatus) Reset() { *m = PodSchedulingStatus{} } -func (*PodSchedulingStatus) ProtoMessage() {} -func (*PodSchedulingStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{4} +func (m *PodSchedulingContextStatus) Reset() { *m = PodSchedulingContextStatus{} } +func (*PodSchedulingContextStatus) ProtoMessage() {} +func (*PodSchedulingContextStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_3add37bbd52889e0, []int{4} } -func (m *PodSchedulingStatus) XXX_Unmarshal(b []byte) error { +func (m *PodSchedulingContextStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *PodSchedulingStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *PodSchedulingContextStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -174,22 +174,22 @@ func (m *PodSchedulingStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, } return b[:n], nil } -func (m *PodSchedulingStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSchedulingStatus.Merge(m, src) +func (m *PodSchedulingContextStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSchedulingContextStatus.Merge(m, src) } -func (m *PodSchedulingStatus) XXX_Size() int { +func (m *PodSchedulingContextStatus) XXX_Size() int { return m.Size() } -func (m *PodSchedulingStatus) XXX_DiscardUnknown() { - xxx_messageInfo_PodSchedulingStatus.DiscardUnknown(m) +func (m *PodSchedulingContextStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PodSchedulingContextStatus.DiscardUnknown(m) } -var xxx_messageInfo_PodSchedulingStatus proto.InternalMessageInfo +var xxx_messageInfo_PodSchedulingContextStatus proto.InternalMessageInfo func (m *ResourceClaim) Reset() { *m = ResourceClaim{} } func (*ResourceClaim) ProtoMessage() {} func (*ResourceClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{5} + return fileDescriptor_3add37bbd52889e0, []int{5} } func (m *ResourceClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -217,7 +217,7 @@ var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo func (m *ResourceClaimConsumerReference) Reset() { *m = ResourceClaimConsumerReference{} } func (*ResourceClaimConsumerReference) ProtoMessage() {} func (*ResourceClaimConsumerReference) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{6} + return fileDescriptor_3add37bbd52889e0, []int{6} } func (m *ResourceClaimConsumerReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -245,7 +245,7 @@ var xxx_messageInfo_ResourceClaimConsumerReference proto.InternalMessageInfo func (m *ResourceClaimList) Reset() { *m = ResourceClaimList{} } func (*ResourceClaimList) ProtoMessage() {} func (*ResourceClaimList) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{7} + return fileDescriptor_3add37bbd52889e0, []int{7} } func (m *ResourceClaimList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -273,7 +273,7 @@ var xxx_messageInfo_ResourceClaimList proto.InternalMessageInfo func (m *ResourceClaimParametersReference) Reset() { *m = ResourceClaimParametersReference{} } func (*ResourceClaimParametersReference) ProtoMessage() {} func (*ResourceClaimParametersReference) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{8} + return fileDescriptor_3add37bbd52889e0, []int{8} } func (m *ResourceClaimParametersReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -301,7 +301,7 @@ var xxx_messageInfo_ResourceClaimParametersReference proto.InternalMessageInfo func (m *ResourceClaimSchedulingStatus) Reset() { *m = ResourceClaimSchedulingStatus{} } func (*ResourceClaimSchedulingStatus) ProtoMessage() {} func (*ResourceClaimSchedulingStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{9} + return fileDescriptor_3add37bbd52889e0, []int{9} } func (m *ResourceClaimSchedulingStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -329,7 +329,7 @@ var xxx_messageInfo_ResourceClaimSchedulingStatus proto.InternalMessageInfo func (m *ResourceClaimSpec) Reset() { *m = ResourceClaimSpec{} } func (*ResourceClaimSpec) ProtoMessage() {} func (*ResourceClaimSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{10} + return fileDescriptor_3add37bbd52889e0, []int{10} } func (m *ResourceClaimSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -357,7 +357,7 @@ var xxx_messageInfo_ResourceClaimSpec proto.InternalMessageInfo func (m *ResourceClaimStatus) Reset() { *m = ResourceClaimStatus{} } func (*ResourceClaimStatus) ProtoMessage() {} func (*ResourceClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{11} + return fileDescriptor_3add37bbd52889e0, []int{11} } func (m *ResourceClaimStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -385,7 +385,7 @@ var xxx_messageInfo_ResourceClaimStatus proto.InternalMessageInfo func (m *ResourceClaimTemplate) Reset() { *m = ResourceClaimTemplate{} } func (*ResourceClaimTemplate) ProtoMessage() {} func (*ResourceClaimTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{12} + return fileDescriptor_3add37bbd52889e0, []int{12} } func (m *ResourceClaimTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -413,7 +413,7 @@ var xxx_messageInfo_ResourceClaimTemplate proto.InternalMessageInfo func (m *ResourceClaimTemplateList) Reset() { *m = ResourceClaimTemplateList{} } func (*ResourceClaimTemplateList) ProtoMessage() {} func (*ResourceClaimTemplateList) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{13} + return fileDescriptor_3add37bbd52889e0, []int{13} } func (m *ResourceClaimTemplateList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -441,7 +441,7 @@ var xxx_messageInfo_ResourceClaimTemplateList proto.InternalMessageInfo func (m *ResourceClaimTemplateSpec) Reset() { *m = ResourceClaimTemplateSpec{} } func (*ResourceClaimTemplateSpec) ProtoMessage() {} func (*ResourceClaimTemplateSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{14} + return fileDescriptor_3add37bbd52889e0, []int{14} } func (m *ResourceClaimTemplateSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -469,7 +469,7 @@ var xxx_messageInfo_ResourceClaimTemplateSpec proto.InternalMessageInfo func (m *ResourceClass) Reset() { *m = ResourceClass{} } func (*ResourceClass) ProtoMessage() {} func (*ResourceClass) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{15} + return fileDescriptor_3add37bbd52889e0, []int{15} } func (m *ResourceClass) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -497,7 +497,7 @@ var xxx_messageInfo_ResourceClass proto.InternalMessageInfo func (m *ResourceClassList) Reset() { *m = ResourceClassList{} } func (*ResourceClassList) ProtoMessage() {} func (*ResourceClassList) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{16} + return fileDescriptor_3add37bbd52889e0, []int{16} } func (m *ResourceClassList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -525,7 +525,7 @@ var xxx_messageInfo_ResourceClassList proto.InternalMessageInfo func (m *ResourceClassParametersReference) Reset() { *m = ResourceClassParametersReference{} } func (*ResourceClassParametersReference) ProtoMessage() {} func (*ResourceClassParametersReference) Descriptor() ([]byte, []int) { - return fileDescriptor_a66b2ee03d862be2, []int{17} + return fileDescriptor_3add37bbd52889e0, []int{17} } func (m *ResourceClassParametersReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -550,107 +550,140 @@ func (m *ResourceClassParametersReference) XXX_DiscardUnknown() { var xxx_messageInfo_ResourceClassParametersReference proto.InternalMessageInfo +func (m *ResourceHandle) Reset() { *m = ResourceHandle{} } +func (*ResourceHandle) ProtoMessage() {} +func (*ResourceHandle) Descriptor() ([]byte, []int) { + return fileDescriptor_3add37bbd52889e0, []int{18} +} +func (m *ResourceHandle) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceHandle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceHandle) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceHandle.Merge(m, src) +} +func (m *ResourceHandle) XXX_Size() int { + return m.Size() +} +func (m *ResourceHandle) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceHandle.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceHandle proto.InternalMessageInfo + func init() { - proto.RegisterType((*AllocationResult)(nil), "k8s.io.api.resource.v1alpha1.AllocationResult") - proto.RegisterType((*PodScheduling)(nil), "k8s.io.api.resource.v1alpha1.PodScheduling") - proto.RegisterType((*PodSchedulingList)(nil), "k8s.io.api.resource.v1alpha1.PodSchedulingList") - proto.RegisterType((*PodSchedulingSpec)(nil), "k8s.io.api.resource.v1alpha1.PodSchedulingSpec") - proto.RegisterType((*PodSchedulingStatus)(nil), "k8s.io.api.resource.v1alpha1.PodSchedulingStatus") - proto.RegisterType((*ResourceClaim)(nil), "k8s.io.api.resource.v1alpha1.ResourceClaim") - proto.RegisterType((*ResourceClaimConsumerReference)(nil), "k8s.io.api.resource.v1alpha1.ResourceClaimConsumerReference") - proto.RegisterType((*ResourceClaimList)(nil), "k8s.io.api.resource.v1alpha1.ResourceClaimList") - proto.RegisterType((*ResourceClaimParametersReference)(nil), "k8s.io.api.resource.v1alpha1.ResourceClaimParametersReference") - proto.RegisterType((*ResourceClaimSchedulingStatus)(nil), "k8s.io.api.resource.v1alpha1.ResourceClaimSchedulingStatus") - proto.RegisterType((*ResourceClaimSpec)(nil), "k8s.io.api.resource.v1alpha1.ResourceClaimSpec") - proto.RegisterType((*ResourceClaimStatus)(nil), "k8s.io.api.resource.v1alpha1.ResourceClaimStatus") - proto.RegisterType((*ResourceClaimTemplate)(nil), "k8s.io.api.resource.v1alpha1.ResourceClaimTemplate") - proto.RegisterType((*ResourceClaimTemplateList)(nil), "k8s.io.api.resource.v1alpha1.ResourceClaimTemplateList") - proto.RegisterType((*ResourceClaimTemplateSpec)(nil), "k8s.io.api.resource.v1alpha1.ResourceClaimTemplateSpec") - proto.RegisterType((*ResourceClass)(nil), "k8s.io.api.resource.v1alpha1.ResourceClass") - proto.RegisterType((*ResourceClassList)(nil), "k8s.io.api.resource.v1alpha1.ResourceClassList") - proto.RegisterType((*ResourceClassParametersReference)(nil), "k8s.io.api.resource.v1alpha1.ResourceClassParametersReference") + proto.RegisterType((*AllocationResult)(nil), "k8s.io.api.resource.v1alpha2.AllocationResult") + proto.RegisterType((*PodSchedulingContext)(nil), "k8s.io.api.resource.v1alpha2.PodSchedulingContext") + proto.RegisterType((*PodSchedulingContextList)(nil), "k8s.io.api.resource.v1alpha2.PodSchedulingContextList") + proto.RegisterType((*PodSchedulingContextSpec)(nil), "k8s.io.api.resource.v1alpha2.PodSchedulingContextSpec") + proto.RegisterType((*PodSchedulingContextStatus)(nil), "k8s.io.api.resource.v1alpha2.PodSchedulingContextStatus") + proto.RegisterType((*ResourceClaim)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaim") + proto.RegisterType((*ResourceClaimConsumerReference)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimConsumerReference") + proto.RegisterType((*ResourceClaimList)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimList") + proto.RegisterType((*ResourceClaimParametersReference)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimParametersReference") + proto.RegisterType((*ResourceClaimSchedulingStatus)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimSchedulingStatus") + proto.RegisterType((*ResourceClaimSpec)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimSpec") + proto.RegisterType((*ResourceClaimStatus)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimStatus") + proto.RegisterType((*ResourceClaimTemplate)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimTemplate") + proto.RegisterType((*ResourceClaimTemplateList)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimTemplateList") + proto.RegisterType((*ResourceClaimTemplateSpec)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimTemplateSpec") + proto.RegisterType((*ResourceClass)(nil), "k8s.io.api.resource.v1alpha2.ResourceClass") + proto.RegisterType((*ResourceClassList)(nil), "k8s.io.api.resource.v1alpha2.ResourceClassList") + proto.RegisterType((*ResourceClassParametersReference)(nil), "k8s.io.api.resource.v1alpha2.ResourceClassParametersReference") + proto.RegisterType((*ResourceHandle)(nil), "k8s.io.api.resource.v1alpha2.ResourceHandle") } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/resource/v1alpha1/generated.proto", fileDescriptor_a66b2ee03d862be2) -} - -var fileDescriptor_a66b2ee03d862be2 = []byte{ - // 1174 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x58, 0xcd, 0x6f, 0x1b, 0x45, - 0x14, 0xcf, 0xc6, 0x6e, 0x95, 0x8c, 0x1b, 0x37, 0xd9, 0x34, 0xc8, 0x8d, 0x5a, 0xdb, 0xec, 0xc9, - 0x12, 0xb0, 0xdb, 0x04, 0x04, 0x15, 0x1f, 0x95, 0xb2, 0x0d, 0x94, 0x08, 0x9a, 0x9a, 0x31, 0x91, - 0x08, 0x42, 0x88, 0xf1, 0xee, 0xab, 0xbd, 0x64, 0xbf, 0xd8, 0xd9, 0x35, 0xaa, 0xb8, 0xf4, 0xca, - 0x0d, 0x21, 0xee, 0x1c, 0xf9, 0x43, 0x10, 0x52, 0x8e, 0x91, 0xe0, 0xd0, 0x93, 0x45, 0xcc, 0x81, - 0x3f, 0x80, 0x13, 0x3d, 0xa1, 0x19, 0xef, 0xae, 0x77, 0xd6, 0x1f, 0xc4, 0x11, 0x8a, 0xc2, 0x29, - 0x99, 0x79, 0xbf, 0xf7, 0x9b, 0xf7, 0x31, 0xef, 0xcd, 0x5b, 0xa3, 0x77, 0x8f, 0xee, 0x52, 0xd5, - 0xf2, 0xb4, 0xa3, 0xa8, 0x0d, 0x81, 0x0b, 0x21, 0x50, 0xad, 0x07, 0xae, 0xe9, 0x05, 0x5a, 0x2c, - 0x20, 0xbe, 0xa5, 0x05, 0x40, 0xbd, 0x28, 0x30, 0x40, 0xeb, 0x6d, 0x11, 0xdb, 0xef, 0x92, 0x2d, - 0xad, 0x03, 0x2e, 0x04, 0x24, 0x04, 0x53, 0xf5, 0x03, 0x2f, 0xf4, 0xe4, 0x5b, 0x43, 0xb4, 0x4a, - 0x7c, 0x4b, 0x4d, 0xd0, 0x6a, 0x82, 0xde, 0x7c, 0xa5, 0x63, 0x85, 0xdd, 0xa8, 0xad, 0x1a, 0x9e, - 0xa3, 0x75, 0xbc, 0x8e, 0xa7, 0x71, 0xa5, 0x76, 0xf4, 0x98, 0xaf, 0xf8, 0x82, 0xff, 0x37, 0x24, - 0xdb, 0x54, 0x32, 0x47, 0x1b, 0x5e, 0xc0, 0x8e, 0xcd, 0x1f, 0xb8, 0xf9, 0xda, 0x08, 0xe3, 0x10, - 0xa3, 0x6b, 0xb9, 0x10, 0x3c, 0xd1, 0xfc, 0xa3, 0x0e, 0xdb, 0xa0, 0x9a, 0x03, 0x21, 0x99, 0xa4, - 0xa5, 0x4d, 0xd3, 0x0a, 0x22, 0x37, 0xb4, 0x1c, 0x18, 0x53, 0x78, 0xfd, 0xdf, 0x14, 0xa8, 0xd1, - 0x05, 0x87, 0xe4, 0xf5, 0x94, 0x3f, 0x25, 0xb4, 0xba, 0x63, 0xdb, 0x9e, 0x41, 0x42, 0xcb, 0x73, - 0x31, 0xd0, 0xc8, 0x0e, 0xe5, 0x7b, 0xa8, 0x9c, 0xc4, 0xe6, 0x7d, 0xe2, 0x9a, 0x36, 0x54, 0xa4, - 0xba, 0xd4, 0x58, 0xd6, 0x5f, 0x38, 0xee, 0xd7, 0x16, 0x06, 0xfd, 0x5a, 0x19, 0x0b, 0x52, 0x9c, - 0x43, 0xcb, 0x6d, 0xb4, 0x4a, 0x7a, 0xc4, 0xb2, 0x49, 0xdb, 0x86, 0x47, 0xee, 0xbe, 0x67, 0x02, - 0xad, 0x2c, 0xd6, 0xa5, 0x46, 0x69, 0xbb, 0xae, 0x66, 0xe2, 0xcf, 0x42, 0xa6, 0xf6, 0xb6, 0x54, - 0x06, 0x68, 0x81, 0x0d, 0x46, 0xe8, 0x05, 0xfa, 0x8d, 0x41, 0xbf, 0xb6, 0xba, 0x93, 0xd3, 0xc6, - 0x63, 0x7c, 0xb2, 0x86, 0x96, 0x69, 0x97, 0x04, 0xc0, 0xf6, 0x2a, 0x85, 0xba, 0xd4, 0x58, 0xd2, - 0xd7, 0x62, 0xf3, 0x96, 0x5b, 0x89, 0x00, 0x8f, 0x30, 0xca, 0x8f, 0x8b, 0x68, 0xa5, 0xe9, 0x99, - 0x2d, 0xa3, 0x0b, 0x66, 0x64, 0x5b, 0x6e, 0x47, 0xfe, 0x02, 0x2d, 0xb1, 0xf8, 0x9b, 0x24, 0x24, - 0xdc, 0xc1, 0xd2, 0xf6, 0x9d, 0x8c, 0x79, 0x69, 0x18, 0x55, 0xff, 0xa8, 0xc3, 0x36, 0xa8, 0xca, - 0xd0, 0xcc, 0xe0, 0x47, 0xed, 0x2f, 0xc1, 0x08, 0x1f, 0x42, 0x48, 0x74, 0x39, 0x3e, 0x13, 0x8d, - 0xf6, 0x70, 0xca, 0x2a, 0x7f, 0x84, 0x8a, 0xd4, 0x07, 0x23, 0x76, 0x5e, 0x53, 0x67, 0x5d, 0x3e, - 0x55, 0x30, 0xae, 0xe5, 0x83, 0xa1, 0x5f, 0x8b, 0xc9, 0x8b, 0x6c, 0x85, 0x39, 0x95, 0x7c, 0x88, - 0xae, 0xd2, 0x90, 0x84, 0x11, 0xe5, 0x4e, 0x97, 0xb6, 0xb7, 0xe6, 0x21, 0xe5, 0x8a, 0x7a, 0x39, - 0xa6, 0xbd, 0x3a, 0x5c, 0xe3, 0x98, 0x50, 0xf9, 0x59, 0x42, 0x6b, 0x02, 0xfe, 0x43, 0x8b, 0x86, - 0xf2, 0x67, 0x63, 0x51, 0x52, 0xcf, 0x16, 0x25, 0xa6, 0xcd, 0x63, 0xb4, 0x1a, 0x9f, 0xb7, 0x94, - 0xec, 0x64, 0x22, 0xd4, 0x44, 0x57, 0xac, 0x10, 0x1c, 0x76, 0x3f, 0x0a, 0x8d, 0xd2, 0xf6, 0x4b, - 0x73, 0x78, 0xa3, 0xaf, 0xc4, 0xbc, 0x57, 0xf6, 0x18, 0x03, 0x1e, 0x12, 0x29, 0xdf, 0xe6, 0xbd, - 0x60, 0xc1, 0x93, 0xef, 0xa2, 0x6b, 0x94, 0x5f, 0x31, 0x30, 0xd9, 0xfd, 0x89, 0x2f, 0xf4, 0x8d, - 0x98, 0xe1, 0x5a, 0x2b, 0x23, 0xc3, 0x02, 0x52, 0x7e, 0x13, 0x95, 0x7d, 0x2f, 0x04, 0x37, 0xb4, - 0x88, 0x9d, 0x5c, 0xe5, 0x42, 0x63, 0x59, 0x97, 0x59, 0x21, 0x34, 0x05, 0x09, 0xce, 0x21, 0x95, - 0xef, 0x25, 0xb4, 0x3e, 0x21, 0x03, 0xf2, 0x37, 0xa3, 0x02, 0xbb, 0x6f, 0x13, 0xcb, 0xa1, 0x15, - 0x89, 0xbb, 0xff, 0xd6, 0x6c, 0xf7, 0x71, 0x56, 0x67, 0x2c, 0xad, 0x63, 0xd5, 0x39, 0xa4, 0xc6, - 0xb9, 0xa3, 0x78, 0x21, 0x08, 0x90, 0xcb, 0x56, 0x08, 0xa2, 0x9b, 0xff, 0x51, 0x21, 0x88, 0xa4, - 0xb3, 0x0b, 0x61, 0x20, 0xa1, 0xaa, 0x80, 0xbf, 0xef, 0xb9, 0x34, 0x72, 0x20, 0xc0, 0xf0, 0x18, - 0x02, 0x70, 0x0d, 0x90, 0x5f, 0x46, 0x4b, 0xc4, 0xb7, 0x1e, 0x04, 0x5e, 0xe4, 0xc7, 0x77, 0x29, - 0xbd, 0xe5, 0x3b, 0xcd, 0x3d, 0xbe, 0x8f, 0x53, 0x04, 0x43, 0x27, 0x16, 0x71, 0x6b, 0x33, 0xe8, - 0xe4, 0x1c, 0x9c, 0x22, 0xe4, 0x3a, 0x2a, 0xba, 0xc4, 0x81, 0x4a, 0x91, 0x23, 0x53, 0xdf, 0xf7, - 0x89, 0x03, 0x98, 0x4b, 0x64, 0x1d, 0x15, 0x22, 0xcb, 0xac, 0x5c, 0xe1, 0x80, 0x3b, 0x31, 0xa0, - 0x70, 0xb0, 0xb7, 0xfb, 0xbc, 0x5f, 0x7b, 0x71, 0xda, 0x4b, 0x10, 0x3e, 0xf1, 0x81, 0xaa, 0x07, - 0x7b, 0xbb, 0x98, 0x29, 0xf3, 0x6a, 0x17, 0x9c, 0xbc, 0x74, 0xd5, 0x2e, 0x58, 0x37, 0xa5, 0xda, - 0x7f, 0x90, 0x50, 0x5d, 0xc0, 0x35, 0x49, 0x40, 0x1c, 0x08, 0x21, 0xa0, 0xe7, 0x4d, 0x56, 0x1d, - 0x15, 0x8f, 0x2c, 0xd7, 0xe4, 0x77, 0x35, 0x13, 0xfe, 0x0f, 0x2c, 0xd7, 0xc4, 0x5c, 0x92, 0x26, - 0xa8, 0x30, 0x2d, 0x41, 0xca, 0x53, 0x09, 0xdd, 0x9e, 0x59, 0xad, 0x29, 0x87, 0x34, 0x35, 0xc9, - 0xef, 0xa0, 0xeb, 0x91, 0x4b, 0x23, 0x2b, 0x64, 0xcf, 0x57, 0xb6, 0xf3, 0xac, 0x0f, 0xfa, 0xb5, - 0xeb, 0x07, 0xa2, 0x08, 0xe7, 0xb1, 0xca, 0x4f, 0x8b, 0xb9, 0xfc, 0xf2, 0x3e, 0xf8, 0x00, 0xad, - 0x65, 0xda, 0x01, 0xa5, 0xfb, 0x23, 0x1b, 0x6e, 0xc6, 0x36, 0x64, 0xb5, 0x86, 0x00, 0x3c, 0xae, - 0x23, 0x7f, 0x8d, 0x56, 0xfc, 0x6c, 0xa8, 0xe3, 0xd2, 0xbe, 0x37, 0x47, 0x4a, 0x27, 0xa4, 0x4a, - 0x5f, 0x1b, 0xf4, 0x6b, 0x2b, 0x82, 0x00, 0x8b, 0xe7, 0xc8, 0x4d, 0x54, 0x26, 0xe9, 0xc0, 0xf2, - 0x90, 0xf5, 0xf2, 0x61, 0x1a, 0x1a, 0x49, 0xfb, 0xdb, 0x11, 0xa4, 0xcf, 0xc7, 0x76, 0x70, 0x4e, - 0x5f, 0xf9, 0x6b, 0x11, 0xad, 0x4f, 0x68, 0x0f, 0xf2, 0x36, 0x42, 0x66, 0x60, 0xf5, 0x20, 0xc8, - 0x04, 0x29, 0x6d, 0x73, 0xbb, 0xa9, 0x04, 0x67, 0x50, 0xf2, 0xe7, 0x08, 0x8d, 0xd8, 0xe3, 0x98, - 0xa8, 0xb3, 0x63, 0x92, 0x1f, 0xbf, 0xf4, 0x32, 0xe3, 0xcf, 0xec, 0x66, 0x18, 0x65, 0x8a, 0x4a, - 0x01, 0x50, 0x08, 0x7a, 0x60, 0xbe, 0xe7, 0x05, 0x95, 0x02, 0xaf, 0xa3, 0xb7, 0xe7, 0x08, 0xfa, - 0x58, 0x2b, 0xd3, 0xd7, 0x63, 0x97, 0x4a, 0x78, 0x44, 0x8c, 0xb3, 0xa7, 0xc8, 0x2d, 0xb4, 0x61, - 0x02, 0xc9, 0x98, 0xf9, 0x55, 0x04, 0x34, 0x04, 0x93, 0x77, 0xa8, 0x25, 0xfd, 0x76, 0x4c, 0xb0, - 0xb1, 0x3b, 0x09, 0x84, 0x27, 0xeb, 0x2a, 0xbf, 0x49, 0x68, 0x43, 0xb0, 0xec, 0x63, 0x70, 0x7c, - 0x9b, 0x84, 0x70, 0x01, 0xcf, 0xd1, 0xa1, 0xf0, 0x1c, 0xbd, 0x31, 0x47, 0xf8, 0x12, 0x23, 0xa7, - 0x3d, 0x4b, 0xca, 0xaf, 0x12, 0xba, 0x39, 0x51, 0xe3, 0x02, 0xda, 0xeb, 0x27, 0x62, 0x7b, 0x7d, - 0xf5, 0x1c, 0x7e, 0x4d, 0x69, 0xb3, 0x27, 0xd3, 0xbc, 0xe2, 0x4d, 0xe5, 0xff, 0x38, 0x3f, 0x28, - 0x7f, 0x8b, 0x63, 0x10, 0xa5, 0x17, 0xe0, 0x86, 0xd8, 0x51, 0x16, 0xcf, 0xd4, 0x51, 0xc6, 0x1a, - 0x6d, 0x61, 0xce, 0x46, 0x4b, 0xe9, 0xf9, 0x1a, 0xed, 0x21, 0x5a, 0x11, 0x5f, 0x9f, 0xe2, 0x19, - 0x3f, 0xe1, 0x38, 0x75, 0x4b, 0x78, 0x9d, 0x44, 0xa6, 0xfc, 0xec, 0x41, 0xe9, 0x65, 0x9e, 0x3d, - 0x28, 0x9d, 0x52, 0x14, 0xbf, 0x88, 0xb3, 0xc7, 0xc4, 0x38, 0x5f, 0xfc, 0xec, 0xc1, 0xbe, 0x8c, - 0xd9, 0x5f, 0xea, 0x13, 0x23, 0x99, 0x21, 0xd3, 0x2f, 0xe3, 0xfd, 0x44, 0x80, 0x47, 0x18, 0x5d, - 0x3f, 0x3e, 0xad, 0x2e, 0x9c, 0x9c, 0x56, 0x17, 0x9e, 0x9d, 0x56, 0x17, 0x9e, 0x0e, 0xaa, 0xd2, - 0xf1, 0xa0, 0x2a, 0x9d, 0x0c, 0xaa, 0xd2, 0xb3, 0x41, 0x55, 0xfa, 0x7d, 0x50, 0x95, 0xbe, 0xfb, - 0xa3, 0xba, 0xf0, 0xe9, 0xad, 0x59, 0xbf, 0xb3, 0xfc, 0x13, 0x00, 0x00, 0xff, 0xff, 0xe7, 0x0a, - 0x8b, 0x49, 0x9f, 0x11, 0x00, 0x00, + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/resource/v1alpha2/generated.proto", fileDescriptor_3add37bbd52889e0) +} + +var fileDescriptor_3add37bbd52889e0 = []byte{ + // 1233 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x58, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0xcf, 0xda, 0x6e, 0x95, 0x4c, 0x1a, 0x37, 0xd9, 0xb6, 0xe0, 0x46, 0xad, 0x63, 0xf6, 0x14, + 0x89, 0xb2, 0xdb, 0x06, 0x54, 0x2a, 0xfe, 0x49, 0xd9, 0x06, 0x4a, 0x04, 0x4d, 0xc3, 0x98, 0x8a, + 0x16, 0x21, 0xd4, 0xc9, 0xee, 0xab, 0xbd, 0x64, 0xff, 0xb1, 0x33, 0x6b, 0xa8, 0xb8, 0xf4, 0x23, + 0xf4, 0xc0, 0x01, 0x4e, 0x1c, 0xf9, 0x02, 0x7c, 0x03, 0x84, 0xd4, 0x63, 0x11, 0x1c, 0x7a, 0xb2, + 0xa8, 0xf9, 0x08, 0x9c, 0xe8, 0x09, 0xcd, 0x78, 0x77, 0xbd, 0xb3, 0xf6, 0x9a, 0x38, 0x07, 0x0b, + 0x4e, 0xc9, 0xcc, 0xfb, 0xbd, 0xdf, 0xfb, 0x37, 0xef, 0xcd, 0xac, 0xd1, 0xbb, 0x87, 0xd7, 0xa8, + 0xee, 0x04, 0xc6, 0x61, 0x7c, 0x00, 0x91, 0x0f, 0x0c, 0xa8, 0xd1, 0x03, 0xdf, 0x0e, 0x22, 0x23, + 0x11, 0x90, 0xd0, 0x31, 0x22, 0xa0, 0x41, 0x1c, 0x59, 0x60, 0xf4, 0xae, 0x10, 0x37, 0xec, 0x92, + 0x2d, 0xa3, 0x03, 0x3e, 0x44, 0x84, 0x81, 0xad, 0x87, 0x51, 0xc0, 0x02, 0xf5, 0xc2, 0x10, 0xad, + 0x93, 0xd0, 0xd1, 0x53, 0xb4, 0x9e, 0xa2, 0xd7, 0x5f, 0xe9, 0x38, 0xac, 0x1b, 0x1f, 0xe8, 0x56, + 0xe0, 0x19, 0x9d, 0xa0, 0x13, 0x18, 0x42, 0xe9, 0x20, 0xbe, 0x2f, 0x56, 0x62, 0x21, 0xfe, 0x1b, + 0x92, 0xad, 0x6b, 0x39, 0xd3, 0x56, 0x10, 0x71, 0xb3, 0x45, 0x83, 0xeb, 0xaf, 0x8d, 0x30, 0x1e, + 0xb1, 0xba, 0x8e, 0x0f, 0xd1, 0x03, 0x23, 0x3c, 0xec, 0xf0, 0x0d, 0x6a, 0x78, 0xc0, 0xc8, 0x24, + 0x2d, 0xa3, 0x4c, 0x2b, 0x8a, 0x7d, 0xe6, 0x78, 0x30, 0xa6, 0x70, 0xf5, 0xdf, 0x14, 0xa8, 0xd5, + 0x05, 0x8f, 0x14, 0xf5, 0xb4, 0xef, 0x2a, 0x68, 0x75, 0xdb, 0x75, 0x03, 0x8b, 0x30, 0x27, 0xf0, + 0x31, 0xd0, 0xd8, 0x65, 0x6a, 0x80, 0x4e, 0xa7, 0xb9, 0x79, 0x9f, 0xf8, 0xb6, 0x0b, 0xb4, 0xa1, + 0xb4, 0xaa, 0x9b, 0xcb, 0x5b, 0x97, 0xf4, 0x69, 0xe9, 0xd3, 0xb1, 0xa4, 0x64, 0xbe, 0xf8, 0xb8, + 0xbf, 0xb1, 0x30, 0xe8, 0x6f, 0x9c, 0x96, 0xf7, 0x29, 0x2e, 0xb2, 0xab, 0x07, 0x68, 0x95, 0xf4, + 0x88, 0xe3, 0x92, 0x03, 0x17, 0x6e, 0xf9, 0x7b, 0x81, 0x0d, 0xb4, 0x51, 0x69, 0x29, 0x9b, 0xcb, + 0x5b, 0xad, 0xbc, 0x45, 0x9e, 0x63, 0xbd, 0x77, 0x45, 0xe7, 0x80, 0x36, 0xb8, 0x60, 0xb1, 0x20, + 0x32, 0xcf, 0x0e, 0xfa, 0x1b, 0xab, 0xdb, 0x05, 0x6d, 0x3c, 0xc6, 0xa7, 0x1a, 0x68, 0x89, 0x76, + 0x49, 0x04, 0x7c, 0xaf, 0x51, 0x6d, 0x29, 0x9b, 0x8b, 0xe6, 0x5a, 0xe2, 0xe0, 0x52, 0x3b, 0x15, + 0xe0, 0x11, 0x46, 0xfb, 0xa9, 0x82, 0xce, 0xee, 0x07, 0x76, 0xdb, 0xea, 0x82, 0x1d, 0xbb, 0x8e, + 0xdf, 0xb9, 0x1e, 0xf8, 0x0c, 0xbe, 0x66, 0xea, 0x3d, 0xb4, 0xc8, 0xeb, 0x66, 0x13, 0x46, 0x1a, + 0x8a, 0xf0, 0xf2, 0x72, 0xce, 0xcb, 0x2c, 0xfd, 0x7a, 0x78, 0xd8, 0xe1, 0x1b, 0x54, 0xe7, 0x68, + 0xee, 0xf7, 0xad, 0x83, 0x2f, 0xc0, 0x62, 0x37, 0x81, 0x11, 0x53, 0x4d, 0x4c, 0xa3, 0xd1, 0x1e, + 0xce, 0x58, 0xd5, 0x3b, 0xa8, 0x46, 0x43, 0xb0, 0x92, 0x1c, 0x5c, 0x9d, 0x9e, 0xf5, 0x49, 0x3e, + 0xb6, 0x43, 0xb0, 0xcc, 0x53, 0x89, 0x8d, 0x1a, 0x5f, 0x61, 0xc1, 0xa8, 0xde, 0x43, 0x27, 0x29, + 0x23, 0x2c, 0xa6, 0x22, 0x05, 0xcb, 0x5b, 0xd7, 0x8e, 0xc1, 0x2d, 0xf4, 0xcd, 0x7a, 0xc2, 0x7e, + 0x72, 0xb8, 0xc6, 0x09, 0xaf, 0xf6, 0xab, 0x82, 0x1a, 0x93, 0xd4, 0x3e, 0x74, 0x28, 0x53, 0x3f, + 0x1b, 0x4b, 0x9d, 0x7e, 0xb4, 0xd4, 0x71, 0x6d, 0x91, 0xb8, 0xd5, 0xc4, 0xec, 0x62, 0xba, 0x93, + 0x4b, 0xdb, 0x27, 0xe8, 0x84, 0xc3, 0xc0, 0xe3, 0x67, 0x87, 0x9f, 0xd6, 0xad, 0xd9, 0x63, 0x33, + 0x57, 0x12, 0xfa, 0x13, 0xbb, 0x9c, 0x08, 0x0f, 0xf9, 0xb4, 0x47, 0x25, 0x31, 0xf1, 0xc4, 0xaa, + 0xd7, 0xd0, 0x29, 0x2a, 0x0e, 0x23, 0xd8, 0xfc, 0xa4, 0x89, 0xb8, 0x96, 0xcc, 0xb3, 0x09, 0xd1, + 0xa9, 0x76, 0x4e, 0x86, 0x25, 0xa4, 0xfa, 0x06, 0xaa, 0x87, 0x01, 0x03, 0x9f, 0x39, 0xc4, 0x4d, + 0x0f, 0x7d, 0x75, 0x73, 0xc9, 0x54, 0x07, 0xfd, 0x8d, 0xfa, 0xbe, 0x24, 0xc1, 0x05, 0xa4, 0xf6, + 0xbd, 0x82, 0xd6, 0xcb, 0xab, 0xa3, 0x7e, 0x83, 0xea, 0x69, 0xc4, 0xd7, 0x5d, 0xe2, 0x78, 0x69, + 0x07, 0xbf, 0x79, 0xb4, 0x0e, 0x16, 0x3a, 0x23, 0xee, 0xa4, 0xe4, 0x2f, 0x24, 0x31, 0xd5, 0x25, + 0x18, 0xc5, 0x05, 0x53, 0xda, 0x0f, 0x15, 0xb4, 0x22, 0x41, 0xe6, 0xd0, 0x32, 0x1f, 0x49, 0x2d, + 0x63, 0xcc, 0x12, 0x66, 0x59, 0xaf, 0xdc, 0x2d, 0xf4, 0xca, 0x95, 0x59, 0x48, 0xa7, 0x37, 0xc9, + 0x40, 0x41, 0x4d, 0x09, 0x7f, 0x3d, 0xf0, 0x69, 0xec, 0x41, 0x84, 0xe1, 0x3e, 0x44, 0xe0, 0x5b, + 0xa0, 0x5e, 0x42, 0x8b, 0x24, 0x74, 0x6e, 0x44, 0x41, 0x1c, 0x26, 0x47, 0x2a, 0x3b, 0xfa, 0xdb, + 0xfb, 0xbb, 0x62, 0x1f, 0x67, 0x08, 0x8e, 0x4e, 0x3d, 0x12, 0xde, 0xe6, 0xd0, 0xa9, 0x1d, 0x9c, + 0x21, 0xd4, 0x16, 0xaa, 0xf9, 0xc4, 0x83, 0x46, 0x4d, 0x20, 0xb3, 0xd8, 0xf7, 0x88, 0x07, 0x58, + 0x48, 0x54, 0x13, 0x55, 0x63, 0xc7, 0x6e, 0x9c, 0x10, 0x80, 0xcb, 0x09, 0xa0, 0x7a, 0x7b, 0x77, + 0xe7, 0x79, 0x7f, 0xe3, 0xa5, 0xb2, 0xbb, 0x86, 0x3d, 0x08, 0x81, 0xea, 0xb7, 0x77, 0x77, 0x30, + 0x57, 0xd6, 0x7e, 0x56, 0xd0, 0x9a, 0x14, 0xe4, 0x1c, 0x46, 0xc0, 0xbe, 0x3c, 0x02, 0x5e, 0x9e, + 0xa1, 0x64, 0x25, 0xbd, 0xff, 0xad, 0x82, 0x5a, 0x12, 0x6e, 0x9f, 0x44, 0xc4, 0x03, 0x06, 0x11, + 0x3d, 0x6e, 0xb1, 0x5a, 0xa8, 0x76, 0xe8, 0xf8, 0xb6, 0x38, 0xab, 0xb9, 0xf4, 0x7f, 0xe0, 0xf8, + 0x36, 0x16, 0x92, 0xac, 0x40, 0xd5, 0xb2, 0x02, 0x69, 0x0f, 0x15, 0x74, 0x71, 0x6a, 0xb7, 0x66, + 0x1c, 0x4a, 0x69, 0x91, 0xdf, 0x46, 0xa7, 0x63, 0x9f, 0xc6, 0x0e, 0xe3, 0xf7, 0x5d, 0x7e, 0x00, + 0x9d, 0xe1, 0xb7, 0xf6, 0x6d, 0x59, 0x84, 0x8b, 0x58, 0xed, 0xc7, 0x4a, 0xa1, 0xbe, 0x62, 0x1c, + 0xde, 0x40, 0x6b, 0xb9, 0x71, 0x40, 0xe9, 0xde, 0xc8, 0x87, 0xf3, 0x89, 0x0f, 0x79, 0xad, 0x21, + 0x00, 0x8f, 0xeb, 0xa8, 0x5f, 0xa1, 0x95, 0x30, 0x9f, 0xea, 0xa4, 0xb5, 0xdf, 0x99, 0xa1, 0xa4, + 0x13, 0x4a, 0x65, 0xae, 0x0d, 0xfa, 0x1b, 0x2b, 0x92, 0x00, 0xcb, 0x76, 0xd4, 0x7d, 0x54, 0x27, + 0xd9, 0x93, 0xe8, 0x26, 0x1f, 0xe9, 0xc3, 0x32, 0x6c, 0xa6, 0xe3, 0x6f, 0x5b, 0x92, 0x3e, 0x1f, + 0xdb, 0xc1, 0x05, 0x7d, 0xed, 0xaf, 0x0a, 0x3a, 0x33, 0x61, 0x3c, 0xa8, 0x5b, 0x08, 0xd9, 0x91, + 0xd3, 0x83, 0x28, 0x97, 0xa4, 0x6c, 0xcc, 0xed, 0x64, 0x12, 0x9c, 0x43, 0xa9, 0x9f, 0x23, 0x34, + 0x62, 0x4f, 0x72, 0xa2, 0x4f, 0xcf, 0x49, 0xf1, 0x81, 0x67, 0xd6, 0x39, 0x7f, 0x6e, 0x37, 0xc7, + 0xa8, 0x52, 0xb4, 0x1c, 0x01, 0x85, 0xa8, 0x07, 0xf6, 0x7b, 0x41, 0xd4, 0xa8, 0x8a, 0x3e, 0x7a, + 0x6b, 0x86, 0xa4, 0x8f, 0x8d, 0x32, 0xf3, 0x4c, 0x12, 0xd2, 0x32, 0x1e, 0x11, 0xe3, 0xbc, 0x15, + 0xb5, 0x8d, 0xce, 0xd9, 0x40, 0x72, 0x6e, 0x7e, 0x19, 0x03, 0x65, 0x60, 0x8b, 0x09, 0xb5, 0x68, + 0x5e, 0x4c, 0x08, 0xce, 0xed, 0x4c, 0x02, 0xe1, 0xc9, 0xba, 0xda, 0xef, 0x0a, 0x3a, 0x27, 0x79, + 0xf6, 0x31, 0x78, 0xa1, 0x4b, 0x18, 0xcc, 0xe1, 0x3a, 0xba, 0x2b, 0x5d, 0x47, 0xaf, 0xcf, 0x90, + 0xbe, 0xd4, 0xc9, 0xb2, 0x6b, 0x49, 0xfb, 0x4d, 0x41, 0xe7, 0x27, 0x6a, 0xcc, 0x61, 0xbc, 0xde, + 0x91, 0xc7, 0xeb, 0xab, 0xc7, 0x88, 0xab, 0x64, 0xcc, 0x3e, 0x29, 0x8b, 0xaa, 0x3d, 0x7c, 0xb6, + 0xfe, 0xff, 0xde, 0x0f, 0xda, 0xdf, 0xf2, 0x33, 0x88, 0xd2, 0x39, 0x84, 0x21, 0x4f, 0x94, 0xca, + 0x91, 0x26, 0xca, 0xd8, 0xa0, 0xad, 0xce, 0x38, 0x68, 0x29, 0x3d, 0xde, 0xa0, 0xbd, 0x8b, 0x56, + 0xe4, 0xdb, 0xa7, 0x76, 0xc4, 0x6f, 0x3e, 0x41, 0xdd, 0x96, 0x6e, 0x27, 0x99, 0xa9, 0xf8, 0xf6, + 0xa0, 0xf4, 0xbf, 0xfc, 0xf6, 0xa0, 0xb4, 0xa4, 0x29, 0x7e, 0x91, 0xdf, 0x1e, 0x13, 0xf3, 0x3c, + 0xff, 0xb7, 0x07, 0xff, 0x94, 0xe6, 0x7f, 0x69, 0x48, 0xac, 0xf4, 0x0d, 0x99, 0x7d, 0x4a, 0xef, + 0xa5, 0x02, 0x3c, 0xc2, 0x68, 0xf7, 0x51, 0x5d, 0xfe, 0x0d, 0xe0, 0x58, 0x37, 0x5f, 0x0b, 0xd5, + 0x44, 0xe5, 0x0a, 0xae, 0xef, 0x10, 0x46, 0xb0, 0x90, 0x98, 0xe6, 0xe3, 0x67, 0xcd, 0x85, 0x27, + 0xcf, 0x9a, 0x0b, 0x4f, 0x9f, 0x35, 0x17, 0x1e, 0x0e, 0x9a, 0xca, 0xe3, 0x41, 0x53, 0x79, 0x32, + 0x68, 0x2a, 0x4f, 0x07, 0x4d, 0xe5, 0x8f, 0x41, 0x53, 0x79, 0xf4, 0x67, 0x73, 0xe1, 0xd3, 0x0b, + 0xd3, 0x7e, 0x31, 0xfa, 0x27, 0x00, 0x00, 0xff, 0xff, 0x67, 0xe4, 0xf6, 0x18, 0x69, 0x12, 0x00, + 0x00, } func (m *AllocationResult) Marshal() (dAtA []byte, err error) { @@ -693,15 +726,24 @@ func (m *AllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x12 } - i -= len(m.ResourceHandle) - copy(dAtA[i:], m.ResourceHandle) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceHandle))) - i-- - dAtA[i] = 0xa + if len(m.ResourceHandles) > 0 { + for iNdEx := len(m.ResourceHandles) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceHandles[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } return len(dAtA) - i, nil } -func (m *PodScheduling) Marshal() (dAtA []byte, err error) { +func (m *PodSchedulingContext) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -711,12 +753,12 @@ func (m *PodScheduling) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PodScheduling) MarshalTo(dAtA []byte) (int, error) { +func (m *PodSchedulingContext) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodScheduling) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *PodSchedulingContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -754,7 +796,7 @@ func (m *PodScheduling) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *PodSchedulingList) Marshal() (dAtA []byte, err error) { +func (m *PodSchedulingContextList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -764,12 +806,12 @@ func (m *PodSchedulingList) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PodSchedulingList) MarshalTo(dAtA []byte) (int, error) { +func (m *PodSchedulingContextList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodSchedulingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *PodSchedulingContextList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -801,7 +843,7 @@ func (m *PodSchedulingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *PodSchedulingSpec) Marshal() (dAtA []byte, err error) { +func (m *PodSchedulingContextSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -811,12 +853,12 @@ func (m *PodSchedulingSpec) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PodSchedulingSpec) MarshalTo(dAtA []byte) (int, error) { +func (m *PodSchedulingContextSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodSchedulingSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *PodSchedulingContextSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -838,7 +880,7 @@ func (m *PodSchedulingSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *PodSchedulingStatus) Marshal() (dAtA []byte, err error) { +func (m *PodSchedulingContextStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -848,12 +890,12 @@ func (m *PodSchedulingStatus) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PodSchedulingStatus) MarshalTo(dAtA []byte) (int, error) { +func (m *PodSchedulingContextStatus) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodSchedulingStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *PodSchedulingContextStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -1485,6 +1527,39 @@ func (m *ResourceClassParametersReference) MarshalToSizedBuffer(dAtA []byte) (in return len(dAtA) - i, nil } +func (m *ResourceHandle) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceHandle) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceHandle) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x12 + i -= len(m.DriverName) + copy(dAtA[i:], m.DriverName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DriverName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { offset -= sovGenerated(v) base := offset @@ -1502,8 +1577,12 @@ func (m *AllocationResult) Size() (n int) { } var l int _ = l - l = len(m.ResourceHandle) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.ResourceHandles) > 0 { + for _, e := range m.ResourceHandles { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } if m.AvailableOnNodes != nil { l = m.AvailableOnNodes.Size() n += 1 + l + sovGenerated(uint64(l)) @@ -1512,7 +1591,7 @@ func (m *AllocationResult) Size() (n int) { return n } -func (m *PodScheduling) Size() (n int) { +func (m *PodSchedulingContext) Size() (n int) { if m == nil { return 0 } @@ -1527,7 +1606,7 @@ func (m *PodScheduling) Size() (n int) { return n } -func (m *PodSchedulingList) Size() (n int) { +func (m *PodSchedulingContextList) Size() (n int) { if m == nil { return 0 } @@ -1544,7 +1623,7 @@ func (m *PodSchedulingList) Size() (n int) { return n } -func (m *PodSchedulingSpec) Size() (n int) { +func (m *PodSchedulingContextSpec) Size() (n int) { if m == nil { return 0 } @@ -1561,7 +1640,7 @@ func (m *PodSchedulingSpec) Size() (n int) { return n } -func (m *PodSchedulingStatus) Size() (n int) { +func (m *PodSchedulingContextStatus) Size() (n int) { if m == nil { return 0 } @@ -1794,6 +1873,19 @@ func (m *ResourceClassParametersReference) Size() (n int) { return n } +func (m *ResourceHandle) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DriverName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Data) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func sovGenerated(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -1804,54 +1896,59 @@ func (this *AllocationResult) String() string { if this == nil { return "nil" } + repeatedStringForResourceHandles := "[]ResourceHandle{" + for _, f := range this.ResourceHandles { + repeatedStringForResourceHandles += strings.Replace(strings.Replace(f.String(), "ResourceHandle", "ResourceHandle", 1), `&`, ``, 1) + "," + } + repeatedStringForResourceHandles += "}" s := strings.Join([]string{`&AllocationResult{`, - `ResourceHandle:` + fmt.Sprintf("%v", this.ResourceHandle) + `,`, + `ResourceHandles:` + repeatedStringForResourceHandles + `,`, `AvailableOnNodes:` + strings.Replace(fmt.Sprintf("%v", this.AvailableOnNodes), "NodeSelector", "v1.NodeSelector", 1) + `,`, `Shareable:` + fmt.Sprintf("%v", this.Shareable) + `,`, `}`, }, "") return s } -func (this *PodScheduling) String() string { +func (this *PodSchedulingContext) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&PodScheduling{`, + s := strings.Join([]string{`&PodSchedulingContext{`, `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSchedulingSpec", "PodSchedulingSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodSchedulingStatus", "PodSchedulingStatus", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSchedulingContextSpec", "PodSchedulingContextSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodSchedulingContextStatus", "PodSchedulingContextStatus", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *PodSchedulingList) String() string { +func (this *PodSchedulingContextList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]PodScheduling{" + repeatedStringForItems := "[]PodSchedulingContext{" for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodScheduling", "PodScheduling", 1), `&`, ``, 1) + "," + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodSchedulingContext", "PodSchedulingContext", 1), `&`, ``, 1) + "," } repeatedStringForItems += "}" - s := strings.Join([]string{`&PodSchedulingList{`, + s := strings.Join([]string{`&PodSchedulingContextList{`, `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s } -func (this *PodSchedulingSpec) String() string { +func (this *PodSchedulingContextSpec) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&PodSchedulingSpec{`, + s := strings.Join([]string{`&PodSchedulingContextSpec{`, `SelectedNode:` + fmt.Sprintf("%v", this.SelectedNode) + `,`, `PotentialNodes:` + fmt.Sprintf("%v", this.PotentialNodes) + `,`, `}`, }, "") return s } -func (this *PodSchedulingStatus) String() string { +func (this *PodSchedulingContextStatus) String() string { if this == nil { return "nil" } @@ -1860,7 +1957,7 @@ func (this *PodSchedulingStatus) String() string { repeatedStringForResourceClaims += strings.Replace(strings.Replace(f.String(), "ResourceClaimSchedulingStatus", "ResourceClaimSchedulingStatus", 1), `&`, ``, 1) + "," } repeatedStringForResourceClaims += "}" - s := strings.Join([]string{`&PodSchedulingStatus{`, + s := strings.Join([]string{`&PodSchedulingContextStatus{`, `ResourceClaims:` + repeatedStringForResourceClaims + `,`, `}`, }, "") @@ -2040,6 +2137,17 @@ func (this *ResourceClassParametersReference) String() string { }, "") return s } +func (this *ResourceHandle) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceHandle{`, + `DriverName:` + fmt.Sprintf("%v", this.DriverName) + `,`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `}`, + }, "") + return s +} func valueToStringGenerated(v interface{}) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -2079,9 +2187,9 @@ func (m *AllocationResult) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceHandle", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceHandles", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2091,23 +2199,25 @@ func (m *AllocationResult) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.ResourceHandle = string(dAtA[iNdEx:postIndex]) + m.ResourceHandles = append(m.ResourceHandles, ResourceHandle{}) + if err := m.ResourceHandles[len(m.ResourceHandles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { @@ -2186,7 +2296,7 @@ func (m *AllocationResult) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodScheduling) Unmarshal(dAtA []byte) error { +func (m *PodSchedulingContext) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2209,10 +2319,10 @@ func (m *PodScheduling) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodScheduling: wiretype end group for non-group") + return fmt.Errorf("proto: PodSchedulingContext: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodScheduling: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodSchedulingContext: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -2335,7 +2445,7 @@ func (m *PodScheduling) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodSchedulingList) Unmarshal(dAtA []byte) error { +func (m *PodSchedulingContextList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2358,10 +2468,10 @@ func (m *PodSchedulingList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodSchedulingList: wiretype end group for non-group") + return fmt.Errorf("proto: PodSchedulingContextList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodSchedulingList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodSchedulingContextList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -2426,7 +2536,7 @@ func (m *PodSchedulingList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, PodScheduling{}) + m.Items = append(m.Items, PodSchedulingContext{}) if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -2452,7 +2562,7 @@ func (m *PodSchedulingList) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodSchedulingSpec) Unmarshal(dAtA []byte) error { +func (m *PodSchedulingContextSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2475,10 +2585,10 @@ func (m *PodSchedulingSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodSchedulingSpec: wiretype end group for non-group") + return fmt.Errorf("proto: PodSchedulingContextSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodSchedulingSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodSchedulingContextSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -2566,7 +2676,7 @@ func (m *PodSchedulingSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodSchedulingStatus) Unmarshal(dAtA []byte) error { +func (m *PodSchedulingContextStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2589,10 +2699,10 @@ func (m *PodSchedulingStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodSchedulingStatus: wiretype end group for non-group") + return fmt.Errorf("proto: PodSchedulingContextStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodSchedulingStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodSchedulingContextStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -4507,6 +4617,120 @@ func (m *ResourceClassParametersReference) Unmarshal(dAtA []byte) error { } return nil } +func (m *ResourceHandle) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceHandle: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceHandle: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DriverName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DriverName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/vendor/k8s.io/api/resource/v1alpha1/generated.proto b/vendor/k8s.io/api/resource/v1alpha2/generated.proto similarity index 79% rename from vendor/k8s.io/api/resource/v1alpha1/generated.proto rename to vendor/k8s.io/api/resource/v1alpha2/generated.proto index 2e814d155b..02412398c4 100644 --- a/vendor/k8s.io/api/resource/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/resource/v1alpha2/generated.proto @@ -19,7 +19,7 @@ limitations under the License. syntax = "proto2"; -package k8s.io.api.resource.v1alpha1; +package k8s.io.api.resource.v1alpha2; import "k8s.io/api/core/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; @@ -27,23 +27,30 @@ import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "k8s.io/api/resource/v1alpha1"; +option go_package = "k8s.io/api/resource/v1alpha2"; -// AllocationResult contains attributed of an allocated resource. +// AllocationResult contains attributes of an allocated resource. message AllocationResult { - // ResourceHandle contains arbitrary data returned by the driver after a - // successful allocation. This is opaque for - // Kubernetes. Driver documentation may explain to users how to - // interpret this data if needed. + // ResourceHandles contain the state associated with an allocation that + // should be maintained throughout the lifetime of a claim. Each + // ResourceHandle contains data that should be passed to a specific kubelet + // plugin once it lands on a node. This data is returned by the driver + // after a successful allocation and is opaque to Kubernetes. Driver + // documentation may explain to users how to interpret this data if needed. // - // The maximum size of this field is 16KiB. This may get - // increased in the future, but not reduced. + // Setting this field is optional. It has a maximum size of 32 entries. + // If null (or empty), it is assumed this allocation will be processed by a + // single kubelet plugin with no ResourceHandle data attached. The name of + // the kubelet plugin invoked will match the DriverName set in the + // ResourceClaimStatus this AllocationResult is embedded in. + // + // +listType=atomic // +optional - optional string resourceHandle = 1; + repeated ResourceHandle resourceHandles = 1; - // This field will get set by the resource driver after it has - // allocated the resource driver to inform the scheduler where it can - // schedule Pods using the ResourceClaim. + // This field will get set by the resource driver after it has allocated + // the resource to inform the scheduler where it can schedule Pods using + // the ResourceClaim. // // Setting this field is optional. If null, the resource is available // everywhere. @@ -56,37 +63,37 @@ message AllocationResult { optional bool shareable = 3; } -// PodScheduling objects hold information that is needed to schedule +// PodSchedulingContext objects hold information that is needed to schedule // a Pod with ResourceClaims that use "WaitForFirstConsumer" allocation // mode. // // This is an alpha type and requires enabling the DynamicResourceAllocation // feature gate. -message PodScheduling { +message PodSchedulingContext { // Standard object metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec describes where resources for the Pod are needed. - optional PodSchedulingSpec spec = 2; + optional PodSchedulingContextSpec spec = 2; // Status describes where resources for the Pod can be allocated. // +optional - optional PodSchedulingStatus status = 3; + optional PodSchedulingContextStatus status = 3; } -// PodSchedulingList is a collection of Pod scheduling objects. -message PodSchedulingList { +// PodSchedulingContextList is a collection of Pod scheduling objects. +message PodSchedulingContextList { // Standard list metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of PodScheduling objects. - repeated PodScheduling items = 2; + // Items is the list of PodSchedulingContext objects. + repeated PodSchedulingContext items = 2; } -// PodSchedulingSpec describes where resources for the Pod are needed. -message PodSchedulingSpec { +// PodSchedulingContextSpec describes where resources for the Pod are needed. +message PodSchedulingContextSpec { // SelectedNode is the node for which allocation of ResourceClaims that // are referenced by the Pod and that use "WaitForFirstConsumer" // allocation is to be attempted. @@ -105,8 +112,8 @@ message PodSchedulingSpec { repeated string potentialNodes = 2; } -// PodSchedulingStatus describes where resources for the Pod can be allocated. -message PodSchedulingStatus { +// PodSchedulingContextStatus describes where resources for the Pod can be allocated. +message PodSchedulingContextStatus { // ResourceClaims describes resource availability for each // pod.spec.resourceClaim entry where the corresponding ResourceClaim // uses "WaitForFirstConsumer" allocation mode. @@ -235,9 +242,9 @@ message ResourceClaimStatus { // +optional optional string driverName = 1; - // Allocation is set by the resource driver once a resource has been - // allocated successfully. If this is not specified, the resource is - // not yet allocated. + // Allocation is set by the resource driver once a resource or set of + // resources has been allocated successfully. If this is not specified, the + // resources have not been allocated yet. // +optional optional AllocationResult allocation = 2; @@ -370,3 +377,24 @@ message ResourceClassParametersReference { optional string namespace = 4; } +// ResourceHandle holds opaque resource data for processing by a specific kubelet plugin. +message ResourceHandle { + // DriverName specifies the name of the resource driver whose kubelet + // plugin should be invoked to process this ResourceHandle's data once it + // lands on a node. This may differ from the DriverName set in + // ResourceClaimStatus this ResourceHandle is embedded in. + optional string driverName = 1; + + // Data contains the opaque data associated with this ResourceHandle. It is + // set by the controller component of the resource driver whose name + // matches the DriverName set in the ResourceClaimStatus this + // ResourceHandle is embedded in. It is set at allocation time and is + // intended for processing by the kubelet plugin whose name matches + // the DriverName set in this ResourceHandle. + // + // The maximum size of this field is 16KiB. This may get increased in the + // future, but not reduced. + // +optional + optional string data = 2; +} + diff --git a/vendor/k8s.io/api/resource/v1alpha1/register.go b/vendor/k8s.io/api/resource/v1alpha2/register.go similarity index 95% rename from vendor/k8s.io/api/resource/v1alpha1/register.go rename to vendor/k8s.io/api/resource/v1alpha2/register.go index 8245b9aee5..6e0d7ceb98 100644 --- a/vendor/k8s.io/api/resource/v1alpha1/register.go +++ b/vendor/k8s.io/api/resource/v1alpha2/register.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1alpha2 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -26,7 +26,7 @@ import ( const GroupName = "resource.k8s.io" // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha2"} // Resource takes an unqualified resource and returns a Group qualified GroupResource func Resource(resource string) schema.GroupResource { @@ -50,8 +50,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &ResourceClaimList{}, &ResourceClaimTemplate{}, &ResourceClaimTemplateList{}, - &PodScheduling{}, - &PodSchedulingList{}, + &PodSchedulingContext{}, + &PodSchedulingContextList{}, ) // Add common types diff --git a/vendor/k8s.io/api/resource/v1alpha1/types.go b/vendor/k8s.io/api/resource/v1alpha2/types.go similarity index 81% rename from vendor/k8s.io/api/resource/v1alpha1/types.go rename to vendor/k8s.io/api/resource/v1alpha2/types.go index af57038403..21936bfe3d 100644 --- a/vendor/k8s.io/api/resource/v1alpha1/types.go +++ b/vendor/k8s.io/api/resource/v1alpha2/types.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1alpha2 import ( v1 "k8s.io/api/core/v1" @@ -99,9 +99,9 @@ type ResourceClaimStatus struct { // +optional DriverName string `json:"driverName,omitempty" protobuf:"bytes,1,opt,name=driverName"` - // Allocation is set by the resource driver once a resource has been - // allocated successfully. If this is not specified, the resource is - // not yet allocated. + // Allocation is set by the resource driver once a resource or set of + // resources has been allocated successfully. If this is not specified, the + // resources have not been allocated yet. // +optional Allocation *AllocationResult `json:"allocation,omitempty" protobuf:"bytes,2,opt,name=allocation"` @@ -133,21 +133,28 @@ type ResourceClaimStatus struct { // claim.status.reservedFor. const ResourceClaimReservedForMaxSize = 32 -// AllocationResult contains attributed of an allocated resource. +// AllocationResult contains attributes of an allocated resource. type AllocationResult struct { - // ResourceHandle contains arbitrary data returned by the driver after a - // successful allocation. This is opaque for - // Kubernetes. Driver documentation may explain to users how to - // interpret this data if needed. + // ResourceHandles contain the state associated with an allocation that + // should be maintained throughout the lifetime of a claim. Each + // ResourceHandle contains data that should be passed to a specific kubelet + // plugin once it lands on a node. This data is returned by the driver + // after a successful allocation and is opaque to Kubernetes. Driver + // documentation may explain to users how to interpret this data if needed. // - // The maximum size of this field is 16KiB. This may get - // increased in the future, but not reduced. + // Setting this field is optional. It has a maximum size of 32 entries. + // If null (or empty), it is assumed this allocation will be processed by a + // single kubelet plugin with no ResourceHandle data attached. The name of + // the kubelet plugin invoked will match the DriverName set in the + // ResourceClaimStatus this AllocationResult is embedded in. + // + // +listType=atomic // +optional - ResourceHandle string `json:"resourceHandle,omitempty" protobuf:"bytes,1,opt,name=resourceHandle"` + ResourceHandles []ResourceHandle `json:"resourceHandles,omitempty" protobuf:"bytes,1,opt,name=resourceHandles"` - // This field will get set by the resource driver after it has - // allocated the resource driver to inform the scheduler where it can - // schedule Pods using the ResourceClaim. + // This field will get set by the resource driver after it has allocated + // the resource to inform the scheduler where it can schedule Pods using + // the ResourceClaim. // // Setting this field is optional. If null, the resource is available // everywhere. @@ -160,8 +167,33 @@ type AllocationResult struct { Shareable bool `json:"shareable,omitempty" protobuf:"varint,3,opt,name=shareable"` } -// ResourceHandleMaxSize is the maximum size of allocation.resourceHandle. -const ResourceHandleMaxSize = 16 * 1024 +// AllocationResultResourceHandlesMaxSize represents the maximum number of +// entries in allocation.resourceHandles. +const AllocationResultResourceHandlesMaxSize = 32 + +// ResourceHandle holds opaque resource data for processing by a specific kubelet plugin. +type ResourceHandle struct { + // DriverName specifies the name of the resource driver whose kubelet + // plugin should be invoked to process this ResourceHandle's data once it + // lands on a node. This may differ from the DriverName set in + // ResourceClaimStatus this ResourceHandle is embedded in. + DriverName string `json:"driverName,omitempty" protobuf:"bytes,1,opt,name=driverName"` + + // Data contains the opaque data associated with this ResourceHandle. It is + // set by the controller component of the resource driver whose name + // matches the DriverName set in the ResourceClaimStatus this + // ResourceHandle is embedded in. It is set at allocation time and is + // intended for processing by the kubelet plugin whose name matches + // the DriverName set in this ResourceHandle. + // + // The maximum size of this field is 16KiB. This may get increased in the + // future, but not reduced. + // +optional + Data string `json:"data,omitempty" protobuf:"bytes,2,opt,name=data"` +} + +// ResourceHandleDataMaxSize represents the maximum size of resourceHandle.data. +const ResourceHandleDataMaxSize = 16 * 1024 // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.26 @@ -181,28 +213,28 @@ type ResourceClaimList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.26 -// PodScheduling objects hold information that is needed to schedule +// PodSchedulingContext objects hold information that is needed to schedule // a Pod with ResourceClaims that use "WaitForFirstConsumer" allocation // mode. // // This is an alpha type and requires enabling the DynamicResourceAllocation // feature gate. -type PodScheduling struct { +type PodSchedulingContext struct { metav1.TypeMeta `json:",inline"` // Standard object metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec describes where resources for the Pod are needed. - Spec PodSchedulingSpec `json:"spec" protobuf:"bytes,2,name=spec"` + Spec PodSchedulingContextSpec `json:"spec" protobuf:"bytes,2,name=spec"` // Status describes where resources for the Pod can be allocated. // +optional - Status PodSchedulingStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` + Status PodSchedulingContextStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } -// PodSchedulingSpec describes where resources for the Pod are needed. -type PodSchedulingSpec struct { +// PodSchedulingContextSpec describes where resources for the Pod are needed. +type PodSchedulingContextSpec struct { // SelectedNode is the node for which allocation of ResourceClaims that // are referenced by the Pod and that use "WaitForFirstConsumer" // allocation is to be attempted. @@ -221,8 +253,8 @@ type PodSchedulingSpec struct { PotentialNodes []string `json:"potentialNodes,omitempty" protobuf:"bytes,2,opt,name=potentialNodes"` } -// PodSchedulingStatus describes where resources for the Pod can be allocated. -type PodSchedulingStatus struct { +// PodSchedulingContextStatus describes where resources for the Pod can be allocated. +type PodSchedulingContextStatus struct { // ResourceClaims describes resource availability for each // pod.spec.resourceClaim entry where the corresponding ResourceClaim // uses "WaitForFirstConsumer" allocation mode. @@ -257,22 +289,22 @@ type ResourceClaimSchedulingStatus struct { } // PodSchedulingNodeListMaxSize defines the maximum number of entries in the -// node lists that are stored in PodScheduling objects. This limit is part +// node lists that are stored in PodSchedulingContext objects. This limit is part // of the API. const PodSchedulingNodeListMaxSize = 128 // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.26 -// PodSchedulingList is a collection of Pod scheduling objects. -type PodSchedulingList struct { +// PodSchedulingContextList is a collection of Pod scheduling objects. +type PodSchedulingContextList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of PodScheduling objects. - Items []PodScheduling `json:"items" protobuf:"bytes,2,rep,name=items"` + // Items is the list of PodSchedulingContext objects. + Items []PodSchedulingContext `json:"items" protobuf:"bytes,2,rep,name=items"` } // +genclient diff --git a/vendor/k8s.io/api/resource/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/resource/v1alpha2/types_swagger_doc_generated.go similarity index 76% rename from vendor/k8s.io/api/resource/v1alpha1/types_swagger_doc_generated.go rename to vendor/k8s.io/api/resource/v1alpha2/types_swagger_doc_generated.go index 6836dbfb6e..474be8c85c 100644 --- a/vendor/k8s.io/api/resource/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/resource/v1alpha2/types_swagger_doc_generated.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1alpha2 // This file contains a collection of methods that can be used from go-restful to // generate Swagger API documentation for its models. Please read this PR for more @@ -24,13 +24,13 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AllocationResult = map[string]string{ - "": "AllocationResult contains attributed of an allocated resource.", - "resourceHandle": "ResourceHandle contains arbitrary data returned by the driver after a successful allocation. This is opaque for Kubernetes. Driver documentation may explain to users how to interpret this data if needed.\n\nThe maximum size of this field is 16KiB. This may get increased in the future, but not reduced.", - "availableOnNodes": "This field will get set by the resource driver after it has allocated the resource driver to inform the scheduler where it can schedule Pods using the ResourceClaim.\n\nSetting this field is optional. If null, the resource is available everywhere.", + "": "AllocationResult contains attributes of an allocated resource.", + "resourceHandles": "ResourceHandles contain the state associated with an allocation that should be maintained throughout the lifetime of a claim. Each ResourceHandle contains data that should be passed to a specific kubelet plugin once it lands on a node. This data is returned by the driver after a successful allocation and is opaque to Kubernetes. Driver documentation may explain to users how to interpret this data if needed.\n\nSetting this field is optional. It has a maximum size of 32 entries. If null (or empty), it is assumed this allocation will be processed by a single kubelet plugin with no ResourceHandle data attached. The name of the kubelet plugin invoked will match the DriverName set in the ResourceClaimStatus this AllocationResult is embedded in.", + "availableOnNodes": "This field will get set by the resource driver after it has allocated the resource to inform the scheduler where it can schedule Pods using the ResourceClaim.\n\nSetting this field is optional. If null, the resource is available everywhere.", "shareable": "Shareable determines whether the resource supports more than one consumer at a time.", } @@ -38,44 +38,44 @@ func (AllocationResult) SwaggerDoc() map[string]string { return map_AllocationResult } -var map_PodScheduling = map[string]string{ - "": "PodScheduling objects hold information that is needed to schedule a Pod with ResourceClaims that use \"WaitForFirstConsumer\" allocation mode.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", +var map_PodSchedulingContext = map[string]string{ + "": "PodSchedulingContext objects hold information that is needed to schedule a Pod with ResourceClaims that use \"WaitForFirstConsumer\" allocation mode.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", "metadata": "Standard object metadata", "spec": "Spec describes where resources for the Pod are needed.", "status": "Status describes where resources for the Pod can be allocated.", } -func (PodScheduling) SwaggerDoc() map[string]string { - return map_PodScheduling +func (PodSchedulingContext) SwaggerDoc() map[string]string { + return map_PodSchedulingContext } -var map_PodSchedulingList = map[string]string{ - "": "PodSchedulingList is a collection of Pod scheduling objects.", +var map_PodSchedulingContextList = map[string]string{ + "": "PodSchedulingContextList is a collection of Pod scheduling objects.", "metadata": "Standard list metadata", - "items": "Items is the list of PodScheduling objects.", + "items": "Items is the list of PodSchedulingContext objects.", } -func (PodSchedulingList) SwaggerDoc() map[string]string { - return map_PodSchedulingList +func (PodSchedulingContextList) SwaggerDoc() map[string]string { + return map_PodSchedulingContextList } -var map_PodSchedulingSpec = map[string]string{ - "": "PodSchedulingSpec describes where resources for the Pod are needed.", +var map_PodSchedulingContextSpec = map[string]string{ + "": "PodSchedulingContextSpec describes where resources for the Pod are needed.", "selectedNode": "SelectedNode is the node for which allocation of ResourceClaims that are referenced by the Pod and that use \"WaitForFirstConsumer\" allocation is to be attempted.", "potentialNodes": "PotentialNodes lists nodes where the Pod might be able to run.\n\nThe size of this field is limited to 128. This is large enough for many clusters. Larger clusters may need more attempts to find a node that suits all pending resources. This may get increased in the future, but not reduced.", } -func (PodSchedulingSpec) SwaggerDoc() map[string]string { - return map_PodSchedulingSpec +func (PodSchedulingContextSpec) SwaggerDoc() map[string]string { + return map_PodSchedulingContextSpec } -var map_PodSchedulingStatus = map[string]string{ - "": "PodSchedulingStatus describes where resources for the Pod can be allocated.", +var map_PodSchedulingContextStatus = map[string]string{ + "": "PodSchedulingContextStatus describes where resources for the Pod can be allocated.", "resourceClaims": "ResourceClaims describes resource availability for each pod.spec.resourceClaim entry where the corresponding ResourceClaim uses \"WaitForFirstConsumer\" allocation mode.", } -func (PodSchedulingStatus) SwaggerDoc() map[string]string { - return map_PodSchedulingStatus +func (PodSchedulingContextStatus) SwaggerDoc() map[string]string { + return map_PodSchedulingContextStatus } var map_ResourceClaim = map[string]string{ @@ -146,7 +146,7 @@ func (ResourceClaimSpec) SwaggerDoc() map[string]string { var map_ResourceClaimStatus = map[string]string{ "": "ResourceClaimStatus tracks whether the resource has been allocated and what the resulting attributes are.", "driverName": "DriverName is a copy of the driver name from the ResourceClass at the time when allocation started.", - "allocation": "Allocation is set by the resource driver once a resource has been allocated successfully. If this is not specified, the resource is not yet allocated.", + "allocation": "Allocation is set by the resource driver once a resource or set of resources has been allocated successfully. If this is not specified, the resources have not been allocated yet.", "reservedFor": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started.\n\nThere can be at most 32 such reservations. This may get increased in the future, but not reduced.", "deallocationRequested": "DeallocationRequested indicates that a ResourceClaim is to be deallocated.\n\nThe driver then must deallocate this claim and reset the field together with clearing the Allocation field.\n\nWhile DeallocationRequested is set, no new consumers may be added to ReservedFor.", } @@ -219,4 +219,14 @@ func (ResourceClassParametersReference) SwaggerDoc() map[string]string { return map_ResourceClassParametersReference } +var map_ResourceHandle = map[string]string{ + "": "ResourceHandle holds opaque resource data for processing by a specific kubelet plugin.", + "driverName": "DriverName specifies the name of the resource driver whose kubelet plugin should be invoked to process this ResourceHandle's data once it lands on a node. This may differ from the DriverName set in ResourceClaimStatus this ResourceHandle is embedded in.", + "data": "Data contains the opaque data associated with this ResourceHandle. It is set by the controller component of the resource driver whose name matches the DriverName set in the ResourceClaimStatus this ResourceHandle is embedded in. It is set at allocation time and is intended for processing by the kubelet plugin whose name matches the DriverName set in this ResourceHandle.\n\nThe maximum size of this field is 16KiB. This may get increased in the future, but not reduced.", +} + +func (ResourceHandle) SwaggerDoc() map[string]string { + return map_ResourceHandle +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/resource/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/resource/v1alpha2/zz_generated.deepcopy.go similarity index 88% rename from vendor/k8s.io/api/resource/v1alpha1/zz_generated.deepcopy.go rename to vendor/k8s.io/api/resource/v1alpha2/zz_generated.deepcopy.go index c00fbfd1d4..89d521bf05 100644 --- a/vendor/k8s.io/api/resource/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/resource/v1alpha2/zz_generated.deepcopy.go @@ -19,7 +19,7 @@ limitations under the License. // Code generated by deepcopy-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( v1 "k8s.io/api/core/v1" @@ -29,6 +29,11 @@ import ( // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AllocationResult) DeepCopyInto(out *AllocationResult) { *out = *in + if in.ResourceHandles != nil { + in, out := &in.ResourceHandles, &out.ResourceHandles + *out = make([]ResourceHandle, len(*in)) + copy(*out, *in) + } if in.AvailableOnNodes != nil { in, out := &in.AvailableOnNodes, &out.AvailableOnNodes *out = new(v1.NodeSelector) @@ -48,7 +53,7 @@ func (in *AllocationResult) DeepCopy() *AllocationResult { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodScheduling) DeepCopyInto(out *PodScheduling) { +func (in *PodSchedulingContext) DeepCopyInto(out *PodSchedulingContext) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) @@ -57,18 +62,18 @@ func (in *PodScheduling) DeepCopyInto(out *PodScheduling) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodScheduling. -func (in *PodScheduling) DeepCopy() *PodScheduling { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContext. +func (in *PodSchedulingContext) DeepCopy() *PodSchedulingContext { if in == nil { return nil } - out := new(PodScheduling) + out := new(PodSchedulingContext) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodScheduling) DeepCopyObject() runtime.Object { +func (in *PodSchedulingContext) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -76,13 +81,13 @@ func (in *PodScheduling) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSchedulingList) DeepCopyInto(out *PodSchedulingList) { +func (in *PodSchedulingContextList) DeepCopyInto(out *PodSchedulingContextList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]PodScheduling, len(*in)) + *out = make([]PodSchedulingContext, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -90,18 +95,18 @@ func (in *PodSchedulingList) DeepCopyInto(out *PodSchedulingList) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingList. -func (in *PodSchedulingList) DeepCopy() *PodSchedulingList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextList. +func (in *PodSchedulingContextList) DeepCopy() *PodSchedulingContextList { if in == nil { return nil } - out := new(PodSchedulingList) + out := new(PodSchedulingContextList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodSchedulingList) DeepCopyObject() runtime.Object { +func (in *PodSchedulingContextList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -109,7 +114,7 @@ func (in *PodSchedulingList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSchedulingSpec) DeepCopyInto(out *PodSchedulingSpec) { +func (in *PodSchedulingContextSpec) DeepCopyInto(out *PodSchedulingContextSpec) { *out = *in if in.PotentialNodes != nil { in, out := &in.PotentialNodes, &out.PotentialNodes @@ -119,18 +124,18 @@ func (in *PodSchedulingSpec) DeepCopyInto(out *PodSchedulingSpec) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingSpec. -func (in *PodSchedulingSpec) DeepCopy() *PodSchedulingSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextSpec. +func (in *PodSchedulingContextSpec) DeepCopy() *PodSchedulingContextSpec { if in == nil { return nil } - out := new(PodSchedulingSpec) + out := new(PodSchedulingContextSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSchedulingStatus) DeepCopyInto(out *PodSchedulingStatus) { +func (in *PodSchedulingContextStatus) DeepCopyInto(out *PodSchedulingContextStatus) { *out = *in if in.ResourceClaims != nil { in, out := &in.ResourceClaims, &out.ResourceClaims @@ -142,12 +147,12 @@ func (in *PodSchedulingStatus) DeepCopyInto(out *PodSchedulingStatus) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingStatus. -func (in *PodSchedulingStatus) DeepCopy() *PodSchedulingStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextStatus. +func (in *PodSchedulingContextStatus) DeepCopy() *PodSchedulingContextStatus { if in == nil { return nil } - out := new(PodSchedulingStatus) + out := new(PodSchedulingContextStatus) in.DeepCopyInto(out) return out } @@ -475,3 +480,19 @@ func (in *ResourceClassParametersReference) DeepCopy() *ResourceClassParametersR in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceHandle) DeepCopyInto(out *ResourceHandle) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceHandle. +func (in *ResourceHandle) DeepCopy() *ResourceHandle { + if in == nil { + return nil + } + out := new(ResourceHandle) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/scheduling/v1/generated.proto b/vendor/k8s.io/api/scheduling/v1/generated.proto index afc090777d..c1a27e8baa 100644 --- a/vendor/k8s.io/api/scheduling/v1/generated.proto +++ b/vendor/k8s.io/api/scheduling/v1/generated.proto @@ -37,7 +37,7 @@ message PriorityClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // The value of this priority class. This is the actual priority that pods + // value represents the integer value of this priority class. This is the actual priority that pods // receive when they have the name of this class in their pod spec. optional int32 value = 2; @@ -54,7 +54,7 @@ message PriorityClass { // +optional optional string description = 4; - // PreemptionPolicy is the Policy for preempting pods with lower priority. + // preemptionPolicy is the Policy for preempting pods with lower priority. // One of Never, PreemptLowerPriority. // Defaults to PreemptLowerPriority if unset. // +optional diff --git a/vendor/k8s.io/api/scheduling/v1/types.go b/vendor/k8s.io/api/scheduling/v1/types.go index 0f2989424e..146bae40d3 100644 --- a/vendor/k8s.io/api/scheduling/v1/types.go +++ b/vendor/k8s.io/api/scheduling/v1/types.go @@ -34,7 +34,7 @@ type PriorityClass struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // The value of this priority class. This is the actual priority that pods + // value represents the integer value of this priority class. This is the actual priority that pods // receive when they have the name of this class in their pod spec. Value int32 `json:"value" protobuf:"bytes,2,opt,name=value"` @@ -51,7 +51,7 @@ type PriorityClass struct { // +optional Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` - // PreemptionPolicy is the Policy for preempting pods with lower priority. + // preemptionPolicy is the Policy for preempting pods with lower priority. // One of Never, PreemptLowerPriority. // Defaults to PreemptLowerPriority if unset. // +optional diff --git a/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go index ac34c531fb..f167e19707 100644 --- a/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go @@ -24,16 +24,16 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_PriorityClass = map[string]string{ "": "PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", + "value": "value represents the integer value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", - "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset.", + "preemptionPolicy": "preemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset.", } func (PriorityClass) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto b/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto index 5c60b7ab4c..f0878fb16e 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto @@ -38,7 +38,7 @@ message PriorityClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // The value of this priority class. This is the actual priority that pods + // value represents the integer value of this priority class. This is the actual priority that pods // receive when they have the name of this class in their pod spec. optional int32 value = 2; @@ -55,7 +55,7 @@ message PriorityClass { // +optional optional string description = 4; - // PreemptionPolicy is the Policy for preempting pods with lower priority. + // preemptionPolicy is the Policy for preempting pods with lower priority. // One of Never, PreemptLowerPriority. // Defaults to PreemptLowerPriority if unset. // +optional diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/types.go b/vendor/k8s.io/api/scheduling/v1alpha1/types.go index 7b0df48646..26ba8ff5dc 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/types.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/types.go @@ -35,7 +35,7 @@ type PriorityClass struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // The value of this priority class. This is the actual priority that pods + // value represents the integer value of this priority class. This is the actual priority that pods // receive when they have the name of this class in their pod spec. Value int32 `json:"value" protobuf:"bytes,2,opt,name=value"` @@ -52,7 +52,7 @@ type PriorityClass struct { // +optional Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` - // PreemptionPolicy is the Policy for preempting pods with lower priority. + // preemptionPolicy is the Policy for preempting pods with lower priority. // One of Never, PreemptLowerPriority. // Defaults to PreemptLowerPriority if unset. // +optional diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go index fa25f969c4..557005db64 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go @@ -24,16 +24,16 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_PriorityClass = map[string]string{ "": "DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", + "value": "value represents the integer value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", - "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset.", + "preemptionPolicy": "preemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset.", } func (PriorityClass) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/scheduling/v1beta1/generated.proto b/vendor/k8s.io/api/scheduling/v1beta1/generated.proto index 44b49ea246..43878184d6 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/generated.proto +++ b/vendor/k8s.io/api/scheduling/v1beta1/generated.proto @@ -38,7 +38,7 @@ message PriorityClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // The value of this priority class. This is the actual priority that pods + // value represents the integer value of this priority class. This is the actual priority that pods // receive when they have the name of this class in their pod spec. optional int32 value = 2; @@ -55,7 +55,7 @@ message PriorityClass { // +optional optional string description = 4; - // PreemptionPolicy is the Policy for preempting pods with lower priority. + // preemptionPolicy is the Policy for preempting pods with lower priority. // One of Never, PreemptLowerPriority. // Defaults to PreemptLowerPriority if unset. // +optional diff --git a/vendor/k8s.io/api/scheduling/v1beta1/types.go b/vendor/k8s.io/api/scheduling/v1beta1/types.go index e315e1b359..6f88592cf2 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/types.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/types.go @@ -39,7 +39,7 @@ type PriorityClass struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // The value of this priority class. This is the actual priority that pods + // value represents the integer value of this priority class. This is the actual priority that pods // receive when they have the name of this class in their pod spec. Value int32 `json:"value" protobuf:"bytes,2,opt,name=value"` @@ -56,7 +56,7 @@ type PriorityClass struct { // +optional Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` - // PreemptionPolicy is the Policy for preempting pods with lower priority. + // preemptionPolicy is the Policy for preempting pods with lower priority. // One of Never, PreemptLowerPriority. // Defaults to PreemptLowerPriority if unset. // +optional diff --git a/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go index cbc140f446..f42008eb91 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go @@ -24,16 +24,16 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_PriorityClass = map[string]string{ "": "DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", + "value": "value represents the integer value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", - "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset.", + "preemptionPolicy": "preemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset.", } func (PriorityClass) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/storage/v1/generated.proto b/vendor/k8s.io/api/storage/v1/generated.proto index d3c425c041..5f8eccaefc 100644 --- a/vendor/k8s.io/api/storage/v1/generated.proto +++ b/vendor/k8s.io/api/storage/v1/generated.proto @@ -46,7 +46,7 @@ message CSIDriver { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the CSI Driver. + // spec represents the specification of the CSI Driver. optional CSIDriverSpec spec = 2; } @@ -79,16 +79,15 @@ message CSIDriverSpec { // +optional optional bool attachRequired = 1; - // If set to true, podInfoOnMount indicates this CSI volume driver - // requires additional pod information (like podName, podUID, etc.) during - // mount operations. + // podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) + // during mount operations, if set to true. // If set to false, pod information will not be passed on mount. // Default is false. + // // The CSI driver specifies podInfoOnMount as part of driver deployment. - // If true, Kubelet will pass pod information as VolumeContext in the CSI - // NodePublishVolume() calls. - // The CSI driver is responsible for parsing and validating the information - // passed in as VolumeContext. + // If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. + // The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. + // // The following VolumeConext will be passed if podInfoOnMount is set to true. // This list might grow, but the prefix will be used. // "csi.storage.k8s.io/pod.name": pod.Name @@ -110,29 +109,27 @@ message CSIDriverSpec { optional bool podInfoOnMount = 2; // volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. - // The default if the list is empty is "Persistent", which is the usage - // defined by the CSI specification and implemented in Kubernetes via the usual - // PV/PVC mechanism. - // The other mode is "Ephemeral". In this mode, volumes are defined inline - // inside the pod spec with CSIVolumeSource and their lifecycle is tied to - // the lifecycle of that pod. A driver has to be aware of this - // because it is only going to get a NodePublishVolume call for such a volume. + // The default if the list is empty is "Persistent", which is the usage defined by the + // CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. + // + // The other mode is "Ephemeral". In this mode, volumes are defined inline inside the pod spec + // with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. + // A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. + // // For more information about implementing this mode, see // https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html - // A driver can support one or more of these modes and - // more modes may be added in the future. - // This field is beta. + // A driver can support one or more of these modes and more modes may be added in the future. // + // This field is beta. // This field is immutable. // // +optional // +listType=set repeated string volumeLifecycleModes = 3; - // If set to true, storageCapacity indicates that the CSI - // volume driver wants pod scheduling to consider the storage + // storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage // capacity that the driver deployment will report by creating - // CSIStorageCapacity objects with capacity information. + // CSIStorageCapacity objects with capacity information, if set to true. // // The check can be enabled immediately when deploying a driver. // In that case, provisioning new volumes with late binding @@ -149,7 +146,7 @@ message CSIDriverSpec { // +featureGate=CSIStorageCapacity optional bool storageCapacity = 4; - // Defines if the underlying volume supports changing ownership and + // fsGroupPolicy defines if the underlying volume supports changing ownership and // permission of the volume before being mounted. // Refer to the specific FSGroupPolicy values for additional details. // @@ -159,10 +156,11 @@ message CSIDriverSpec { // to determine if Kubernetes should modify ownership and permissions of the volume. // With the default policy the defined fsGroup will only be applied // if a fstype is defined and the volume's access mode contains ReadWriteOnce. + // // +optional optional string fsGroupPolicy = 5; - // TokenRequests indicates the CSI driver needs pods' service account + // tokenRequests indicates the CSI driver needs pods' service account // tokens it is mounting volume for to do necessary authentication. Kubelet // will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. // The CSI driver should parse and validate the following VolumeContext: @@ -182,7 +180,7 @@ message CSIDriverSpec { // +listType=atomic repeated TokenRequest tokenRequests = 6; - // RequiresRepublish indicates the CSI driver wants `NodePublishVolume` + // requiresRepublish indicates the CSI driver wants `NodePublishVolume` // being periodically called to reflect any possible change in the mounted // volume. This field defaults to false. // @@ -193,7 +191,7 @@ message CSIDriverSpec { // +optional optional bool requiresRepublish = 7; - // SELinuxMount specifies if the CSI driver supports "-o context" + // seLinuxMount specifies if the CSI driver supports "-o context" // mount option. // // When "true", the CSI driver must ensure that all volumes provided by this CSI @@ -211,6 +209,7 @@ message CSIDriverSpec { // // Default is "false". // + // +featureGate=SELinuxMountReadWriteOncePod // +optional optional bool seLinuxMount = 8; } @@ -225,6 +224,7 @@ message CSIDriverSpec { // enough that it doesn't create this object. // CSINode has an OwnerReference that points to the corresponding node object. message CSINode { + // Standard object's metadata. // metadata.name must be the Kubernetes node name. optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -234,7 +234,7 @@ message CSINode { // CSINodeDriver holds information about the specification of one CSI driver installed on a node message CSINodeDriver { - // This is the name of the CSI driver that this object refers to. + // name represents the name of the CSI driver that this object refers to. // This MUST be the same name returned by the CSI GetPluginName() call for // that driver. optional string name = 1; @@ -314,11 +314,11 @@ message CSINodeSpec { // the scheduler assumes that capacity is insufficient and tries some other // node. message CSIStorageCapacity { - // Standard object's metadata. The name has no particular meaning. It must be - // be a DNS subdomain (dots allowed, 253 characters). To ensure that - // there are no conflicts with other CSI drivers on the cluster, the recommendation - // is to use csisc-, a generated name, or a reverse-domain name which ends - // with the unique CSI driver name. + // Standard object's metadata. + // The name has no particular meaning. It must be a DNS subdomain (dots allowed, 253 characters). + // To ensure that there are no conflicts with other CSI drivers on the cluster, + // the recommendation is to use csisc-, a generated name, or a reverse-domain name + // which ends with the unique CSI driver name. // // Objects are namespaced. // @@ -326,7 +326,7 @@ message CSIStorageCapacity { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // NodeTopology defines which nodes have access to the storage + // nodeTopology defines which nodes have access to the storage // for which capacity was reported. If not set, the storage is // not accessible from any node in the cluster. If empty, the // storage is accessible from all nodes. This field is @@ -335,7 +335,7 @@ message CSIStorageCapacity { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector nodeTopology = 2; - // The name of the StorageClass that the reported capacity applies to. + // storageClassName represents the name of the StorageClass that the reported capacity applies to. // It must meet the same requirements as the name of a StorageClass // object (non-empty, DNS subdomain). If that object no longer exists, // the CSIStorageCapacity object is obsolete and should be removed by its @@ -343,7 +343,7 @@ message CSIStorageCapacity { // This field is immutable. optional string storageClassName = 3; - // Capacity is the value reported by the CSI driver in its GetCapacityResponse + // capacity is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -355,7 +355,7 @@ message CSIStorageCapacity { // +optional optional k8s.io.apimachinery.pkg.api.resource.Quantity capacity = 4; - // MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse + // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -377,7 +377,7 @@ message CSIStorageCapacityList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of CSIStorageCapacity objects. + // items is the list of CSIStorageCapacity objects. // +listType=map // +listMapKey=name repeated CSIStorageCapacity items = 2; @@ -394,36 +394,36 @@ message StorageClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Provisioner indicates the type of the provisioner. + // provisioner indicates the type of the provisioner. optional string provisioner = 2; - // Parameters holds the parameters for the provisioner that should + // parameters holds the parameters for the provisioner that should // create volumes of this storage class. // +optional map parameters = 3; - // Dynamically provisioned PersistentVolumes of this storage class are - // created with this reclaimPolicy. Defaults to Delete. + // reclaimPolicy controls the reclaimPolicy for dynamically provisioned PersistentVolumes of this storage class. + // Defaults to Delete. // +optional optional string reclaimPolicy = 4; - // Dynamically provisioned PersistentVolumes of this storage class are - // created with these mountOptions, e.g. ["ro", "soft"]. Not validated - + // mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class. + // e.g. ["ro", "soft"]. Not validated - // mount of the PVs will simply fail if one is invalid. // +optional repeated string mountOptions = 5; - // AllowVolumeExpansion shows whether the storage class allow volume expand + // allowVolumeExpansion shows whether the storage class allow volume expand. // +optional optional bool allowVolumeExpansion = 6; - // VolumeBindingMode indicates how PersistentVolumeClaims should be + // volumeBindingMode indicates how PersistentVolumeClaims should be // provisioned and bound. When unset, VolumeBindingImmediate is used. // This field is only honored by servers that enable the VolumeScheduling feature. // +optional optional string volumeBindingMode = 7; - // Restrict the node topologies where volumes can be dynamically provisioned. + // allowedTopologies restrict the node topologies where volumes can be dynamically provisioned. // Each volume plugin defines its own supported topology specifications. // An empty TopologySelectorTerm list means there is no topology restriction. // This field is only honored by servers that enable the VolumeScheduling feature. @@ -439,17 +439,17 @@ message StorageClassList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of StorageClasses + // items is the list of StorageClasses repeated StorageClass items = 2; } // TokenRequest contains parameters of a service account token. message TokenRequest { - // Audience is the intended audience of the token in "TokenRequestSpec". + // audience is the intended audience of the token in "TokenRequestSpec". // It will default to the audiences of kube apiserver. optional string audience = 1; - // ExpirationSeconds is the duration of validity of the token in "TokenRequestSpec". + // expirationSeconds is the duration of validity of the token in "TokenRequestSpec". // It has the same default value of "ExpirationSeconds" in "TokenRequestSpec". // // +optional @@ -466,11 +466,11 @@ message VolumeAttachment { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the desired attach/detach volume behavior. + // spec represents specification of the desired attach/detach volume behavior. // Populated by the Kubernetes system. optional VolumeAttachmentSpec spec = 2; - // Status of the VolumeAttachment request. + // status represents status of the VolumeAttachment request. // Populated by the entity completing the attach or detach // operation, i.e. the external-attacher. // +optional @@ -484,7 +484,7 @@ message VolumeAttachmentList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of VolumeAttachments + // items is the list of VolumeAttachments repeated VolumeAttachment items = 2; } @@ -493,7 +493,7 @@ message VolumeAttachmentList { // in future we may allow also inline volumes in pods. // Exactly one member can be set. message VolumeAttachmentSource { - // Name of the persistent volume to attach. + // persistentVolumeName represents the name of the persistent volume to attach. // +optional optional string persistentVolumeName = 1; @@ -509,39 +509,39 @@ message VolumeAttachmentSource { // VolumeAttachmentSpec is the specification of a VolumeAttachment request. message VolumeAttachmentSpec { - // Attacher indicates the name of the volume driver that MUST handle this + // attacher indicates the name of the volume driver that MUST handle this // request. This is the name returned by GetPluginName(). optional string attacher = 1; - // Source represents the volume that should be attached. + // source represents the volume that should be attached. optional VolumeAttachmentSource source = 2; - // The node that the volume should be attached to. + // nodeName represents the node that the volume should be attached to. optional string nodeName = 3; } // VolumeAttachmentStatus is the status of a VolumeAttachment request. message VolumeAttachmentStatus { - // Indicates the volume is successfully attached. + // attached indicates the volume is successfully attached. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. optional bool attached = 1; - // Upon successful attach, this field is populated with any - // information returned by the attach operation that must be passed + // attachmentMetadata is populated with any + // information returned by the attach operation, upon successful attach, that must be passed // into subsequent WaitForAttach or Mount calls. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional map attachmentMetadata = 2; - // The last error encountered during attach operation, if any. + // attachError represents the last error encountered during attach operation, if any. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional optional VolumeError attachError = 3; - // The last error encountered during detach operation, if any. + // detachError represents the last error encountered during detach operation, if any. // This field must only be set by the entity completing the detach // operation, i.e. the external-attacher. // +optional @@ -550,11 +550,11 @@ message VolumeAttachmentStatus { // VolumeError captures an error encountered during a volume operation. message VolumeError { - // Time the error was encountered. + // time represents the time the error was encountered. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; - // String detailing the error encountered during Attach or Detach operation. + // message represents the error encountered during Attach or Detach operation. // This string may be logged, so it should not contain sensitive // information. // +optional @@ -563,7 +563,7 @@ message VolumeError { // VolumeNodeResources is a set of resource limits for scheduling of volumes. message VolumeNodeResources { - // Maximum number of unique volumes managed by the CSI driver that can be used on a node. + // count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. // A volume that is both attached and mounted on a node is considered to be used once, not twice. // The same rule applies for a unique volume that is shared among multiple pods on the same node. // If this field is not specified, then the supported number of volumes on this node is unbounded. diff --git a/vendor/k8s.io/api/storage/v1/types.go b/vendor/k8s.io/api/storage/v1/types.go index f57099df6d..c785f368ef 100644 --- a/vendor/k8s.io/api/storage/v1/types.go +++ b/vendor/k8s.io/api/storage/v1/types.go @@ -33,41 +33,42 @@ import ( // according to etcd is in ObjectMeta.Name. type StorageClass struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Provisioner indicates the type of the provisioner. + // provisioner indicates the type of the provisioner. Provisioner string `json:"provisioner" protobuf:"bytes,2,opt,name=provisioner"` - // Parameters holds the parameters for the provisioner that should + // parameters holds the parameters for the provisioner that should // create volumes of this storage class. // +optional Parameters map[string]string `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"` - // Dynamically provisioned PersistentVolumes of this storage class are - // created with this reclaimPolicy. Defaults to Delete. + // reclaimPolicy controls the reclaimPolicy for dynamically provisioned PersistentVolumes of this storage class. + // Defaults to Delete. // +optional ReclaimPolicy *v1.PersistentVolumeReclaimPolicy `json:"reclaimPolicy,omitempty" protobuf:"bytes,4,opt,name=reclaimPolicy,casttype=k8s.io/api/core/v1.PersistentVolumeReclaimPolicy"` - // Dynamically provisioned PersistentVolumes of this storage class are - // created with these mountOptions, e.g. ["ro", "soft"]. Not validated - + // mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class. + // e.g. ["ro", "soft"]. Not validated - // mount of the PVs will simply fail if one is invalid. // +optional MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,5,opt,name=mountOptions"` - // AllowVolumeExpansion shows whether the storage class allow volume expand + // allowVolumeExpansion shows whether the storage class allow volume expand. // +optional AllowVolumeExpansion *bool `json:"allowVolumeExpansion,omitempty" protobuf:"varint,6,opt,name=allowVolumeExpansion"` - // VolumeBindingMode indicates how PersistentVolumeClaims should be + // volumeBindingMode indicates how PersistentVolumeClaims should be // provisioned and bound. When unset, VolumeBindingImmediate is used. // This field is only honored by servers that enable the VolumeScheduling feature. // +optional VolumeBindingMode *VolumeBindingMode `json:"volumeBindingMode,omitempty" protobuf:"bytes,7,opt,name=volumeBindingMode"` - // Restrict the node topologies where volumes can be dynamically provisioned. + // allowedTopologies restrict the node topologies where volumes can be dynamically provisioned. // Each volume plugin defines its own supported topology specifications. // An empty TopologySelectorTerm list means there is no topology restriction. // This field is only honored by servers that enable the VolumeScheduling feature. @@ -81,12 +82,13 @@ type StorageClass struct { // StorageClassList is a collection of storage classes. type StorageClassList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of StorageClasses + // items is the list of StorageClasses Items []StorageClass `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -122,11 +124,11 @@ type VolumeAttachment struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the desired attach/detach volume behavior. + // spec represents specification of the desired attach/detach volume behavior. // Populated by the Kubernetes system. Spec VolumeAttachmentSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - // Status of the VolumeAttachment request. + // status represents status of the VolumeAttachment request. // Populated by the entity completing the attach or detach // operation, i.e. the external-attacher. // +optional @@ -138,25 +140,26 @@ type VolumeAttachment struct { // VolumeAttachmentList is a collection of VolumeAttachment objects. type VolumeAttachmentList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of VolumeAttachments + // items is the list of VolumeAttachments Items []VolumeAttachment `json:"items" protobuf:"bytes,2,rep,name=items"` } // VolumeAttachmentSpec is the specification of a VolumeAttachment request. type VolumeAttachmentSpec struct { - // Attacher indicates the name of the volume driver that MUST handle this + // attacher indicates the name of the volume driver that MUST handle this // request. This is the name returned by GetPluginName(). Attacher string `json:"attacher" protobuf:"bytes,1,opt,name=attacher"` - // Source represents the volume that should be attached. + // source represents the volume that should be attached. Source VolumeAttachmentSource `json:"source" protobuf:"bytes,2,opt,name=source"` - // The node that the volume should be attached to. + // nodeName represents the node that the volume should be attached to. NodeName string `json:"nodeName" protobuf:"bytes,3,opt,name=nodeName"` } @@ -165,7 +168,7 @@ type VolumeAttachmentSpec struct { // in future we may allow also inline volumes in pods. // Exactly one member can be set. type VolumeAttachmentSource struct { - // Name of the persistent volume to attach. + // persistentVolumeName represents the name of the persistent volume to attach. // +optional PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"` @@ -181,26 +184,26 @@ type VolumeAttachmentSource struct { // VolumeAttachmentStatus is the status of a VolumeAttachment request. type VolumeAttachmentStatus struct { - // Indicates the volume is successfully attached. + // attached indicates the volume is successfully attached. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. Attached bool `json:"attached" protobuf:"varint,1,opt,name=attached"` - // Upon successful attach, this field is populated with any - // information returned by the attach operation that must be passed + // attachmentMetadata is populated with any + // information returned by the attach operation, upon successful attach, that must be passed // into subsequent WaitForAttach or Mount calls. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional AttachmentMetadata map[string]string `json:"attachmentMetadata,omitempty" protobuf:"bytes,2,rep,name=attachmentMetadata"` - // The last error encountered during attach operation, if any. + // attachError represents the last error encountered during attach operation, if any. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional AttachError *VolumeError `json:"attachError,omitempty" protobuf:"bytes,3,opt,name=attachError,casttype=VolumeError"` - // The last error encountered during detach operation, if any. + // detachError represents the last error encountered during detach operation, if any. // This field must only be set by the entity completing the detach // operation, i.e. the external-attacher. // +optional @@ -209,11 +212,11 @@ type VolumeAttachmentStatus struct { // VolumeError captures an error encountered during a volume operation. type VolumeError struct { - // Time the error was encountered. + // time represents the time the error was encountered. // +optional Time metav1.Time `json:"time,omitempty" protobuf:"bytes,1,opt,name=time"` - // String detailing the error encountered during Attach or Detach operation. + // message represents the error encountered during Attach or Detach operation. // This string may be logged, so it should not contain sensitive // information. // +optional @@ -242,7 +245,7 @@ type CSIDriver struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the CSI Driver. + // spec represents the specification of the CSI Driver. Spec CSIDriverSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` } @@ -279,16 +282,15 @@ type CSIDriverSpec struct { // +optional AttachRequired *bool `json:"attachRequired,omitempty" protobuf:"varint,1,opt,name=attachRequired"` - // If set to true, podInfoOnMount indicates this CSI volume driver - // requires additional pod information (like podName, podUID, etc.) during - // mount operations. + // podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) + // during mount operations, if set to true. // If set to false, pod information will not be passed on mount. // Default is false. + // // The CSI driver specifies podInfoOnMount as part of driver deployment. - // If true, Kubelet will pass pod information as VolumeContext in the CSI - // NodePublishVolume() calls. - // The CSI driver is responsible for parsing and validating the information - // passed in as VolumeContext. + // If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. + // The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. + // // The following VolumeConext will be passed if podInfoOnMount is set to true. // This list might grow, but the prefix will be used. // "csi.storage.k8s.io/pod.name": pod.Name @@ -310,29 +312,27 @@ type CSIDriverSpec struct { PodInfoOnMount *bool `json:"podInfoOnMount,omitempty" protobuf:"bytes,2,opt,name=podInfoOnMount"` // volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. - // The default if the list is empty is "Persistent", which is the usage - // defined by the CSI specification and implemented in Kubernetes via the usual - // PV/PVC mechanism. - // The other mode is "Ephemeral". In this mode, volumes are defined inline - // inside the pod spec with CSIVolumeSource and their lifecycle is tied to - // the lifecycle of that pod. A driver has to be aware of this - // because it is only going to get a NodePublishVolume call for such a volume. + // The default if the list is empty is "Persistent", which is the usage defined by the + // CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. + // + // The other mode is "Ephemeral". In this mode, volumes are defined inline inside the pod spec + // with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. + // A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. + // // For more information about implementing this mode, see // https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html - // A driver can support one or more of these modes and - // more modes may be added in the future. - // This field is beta. + // A driver can support one or more of these modes and more modes may be added in the future. // + // This field is beta. // This field is immutable. // // +optional // +listType=set VolumeLifecycleModes []VolumeLifecycleMode `json:"volumeLifecycleModes,omitempty" protobuf:"bytes,3,opt,name=volumeLifecycleModes"` - // If set to true, storageCapacity indicates that the CSI - // volume driver wants pod scheduling to consider the storage + // storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage // capacity that the driver deployment will report by creating - // CSIStorageCapacity objects with capacity information. + // CSIStorageCapacity objects with capacity information, if set to true. // // The check can be enabled immediately when deploying a driver. // In that case, provisioning new volumes with late binding @@ -349,7 +349,7 @@ type CSIDriverSpec struct { // +featureGate=CSIStorageCapacity StorageCapacity *bool `json:"storageCapacity,omitempty" protobuf:"bytes,4,opt,name=storageCapacity"` - // Defines if the underlying volume supports changing ownership and + // fsGroupPolicy defines if the underlying volume supports changing ownership and // permission of the volume before being mounted. // Refer to the specific FSGroupPolicy values for additional details. // @@ -359,10 +359,11 @@ type CSIDriverSpec struct { // to determine if Kubernetes should modify ownership and permissions of the volume. // With the default policy the defined fsGroup will only be applied // if a fstype is defined and the volume's access mode contains ReadWriteOnce. + // // +optional FSGroupPolicy *FSGroupPolicy `json:"fsGroupPolicy,omitempty" protobuf:"bytes,5,opt,name=fsGroupPolicy"` - // TokenRequests indicates the CSI driver needs pods' service account + // tokenRequests indicates the CSI driver needs pods' service account // tokens it is mounting volume for to do necessary authentication. Kubelet // will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. // The CSI driver should parse and validate the following VolumeContext: @@ -382,7 +383,7 @@ type CSIDriverSpec struct { // +listType=atomic TokenRequests []TokenRequest `json:"tokenRequests,omitempty" protobuf:"bytes,6,opt,name=tokenRequests"` - // RequiresRepublish indicates the CSI driver wants `NodePublishVolume` + // requiresRepublish indicates the CSI driver wants `NodePublishVolume` // being periodically called to reflect any possible change in the mounted // volume. This field defaults to false. // @@ -393,7 +394,7 @@ type CSIDriverSpec struct { // +optional RequiresRepublish *bool `json:"requiresRepublish,omitempty" protobuf:"varint,7,opt,name=requiresRepublish"` - // SELinuxMount specifies if the CSI driver supports "-o context" + // seLinuxMount specifies if the CSI driver supports "-o context" // mount option. // // When "true", the CSI driver must ensure that all volumes provided by this CSI @@ -411,6 +412,7 @@ type CSIDriverSpec struct { // // Default is "false". // + // +featureGate=SELinuxMountReadWriteOncePod // +optional SELinuxMount *bool `json:"seLinuxMount,omitempty" protobuf:"varint,8,opt,name=seLinuxMount"` } @@ -453,12 +455,11 @@ type VolumeLifecycleMode string // TokenRequest contains parameters of a service account token. type TokenRequest struct { - // Audience is the intended audience of the token in "TokenRequestSpec". + // audience is the intended audience of the token in "TokenRequestSpec". // It will default to the audiences of kube apiserver. - // Audience string `json:"audience" protobuf:"bytes,1,opt,name=audience"` - // ExpirationSeconds is the duration of validity of the token in "TokenRequestSpec". + // expirationSeconds is the duration of validity of the token in "TokenRequestSpec". // It has the same default value of "ExpirationSeconds" in "TokenRequestSpec". // // +optional @@ -502,6 +503,7 @@ const ( type CSINode struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // metadata.name must be the Kubernetes node name. metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -520,7 +522,7 @@ type CSINodeSpec struct { // CSINodeDriver holds information about the specification of one CSI driver installed on a node type CSINodeDriver struct { - // This is the name of the CSI driver that this object refers to. + // name represents the name of the CSI driver that this object refers to. // This MUST be the same name returned by the CSI GetPluginName() call for // that driver. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` @@ -557,7 +559,7 @@ type CSINodeDriver struct { // VolumeNodeResources is a set of resource limits for scheduling of volumes. type VolumeNodeResources struct { - // Maximum number of unique volumes managed by the CSI driver that can be used on a node. + // count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. // A volume that is both attached and mounted on a node is considered to be used once, not twice. // The same rule applies for a unique volume that is shared among multiple pods on the same node. // If this field is not specified, then the supported number of volumes on this node is unbounded. @@ -609,11 +611,12 @@ type CSINodeList struct { // node. type CSIStorageCapacity struct { metav1.TypeMeta `json:",inline"` - // Standard object's metadata. The name has no particular meaning. It must be - // be a DNS subdomain (dots allowed, 253 characters). To ensure that - // there are no conflicts with other CSI drivers on the cluster, the recommendation - // is to use csisc-, a generated name, or a reverse-domain name which ends - // with the unique CSI driver name. + + // Standard object's metadata. + // The name has no particular meaning. It must be a DNS subdomain (dots allowed, 253 characters). + // To ensure that there are no conflicts with other CSI drivers on the cluster, + // the recommendation is to use csisc-, a generated name, or a reverse-domain name + // which ends with the unique CSI driver name. // // Objects are namespaced. // @@ -621,7 +624,7 @@ type CSIStorageCapacity struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // NodeTopology defines which nodes have access to the storage + // nodeTopology defines which nodes have access to the storage // for which capacity was reported. If not set, the storage is // not accessible from any node in the cluster. If empty, the // storage is accessible from all nodes. This field is @@ -630,7 +633,7 @@ type CSIStorageCapacity struct { // +optional NodeTopology *metav1.LabelSelector `json:"nodeTopology,omitempty" protobuf:"bytes,2,opt,name=nodeTopology"` - // The name of the StorageClass that the reported capacity applies to. + // storageClassName represents the name of the StorageClass that the reported capacity applies to. // It must meet the same requirements as the name of a StorageClass // object (non-empty, DNS subdomain). If that object no longer exists, // the CSIStorageCapacity object is obsolete and should be removed by its @@ -638,7 +641,7 @@ type CSIStorageCapacity struct { // This field is immutable. StorageClassName string `json:"storageClassName" protobuf:"bytes,3,name=storageClassName"` - // Capacity is the value reported by the CSI driver in its GetCapacityResponse + // capacity is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -650,7 +653,7 @@ type CSIStorageCapacity struct { // +optional Capacity *resource.Quantity `json:"capacity,omitempty" protobuf:"bytes,4,opt,name=capacity"` - // MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse + // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -670,12 +673,13 @@ type CSIStorageCapacity struct { // CSIStorageCapacityList is a collection of CSIStorageCapacity objects. type CSIStorageCapacityList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of CSIStorageCapacity objects. + // items is the list of CSIStorageCapacity objects. // +listType=map // +listMapKey=name Items []CSIStorageCapacity `json:"items" protobuf:"bytes,2,rep,name=items"` diff --git a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go index 1a069bb403..c92a7f95a2 100644 --- a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go @@ -24,13 +24,13 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CSIDriver = map[string]string{ "": "CSIDriver captures information about a Container Storage Interface (CSI) volume driver deployed on the cluster. Kubernetes attach detach controller uses this object to determine whether attach is required. Kubelet uses this object to determine whether pod information needs to be passed on mount. CSIDriver objects are non-namespaced.", "metadata": "Standard object metadata. metadata.Name indicates the name of the CSI driver that this object refers to; it MUST be the same name returned by the CSI GetPluginName() call for that driver. The driver name must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and alphanumerics between. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the CSI Driver.", + "spec": "spec represents the specification of the CSI Driver.", } func (CSIDriver) SwaggerDoc() map[string]string { @@ -50,13 +50,13 @@ func (CSIDriverList) SwaggerDoc() map[string]string { var map_CSIDriverSpec = map[string]string{ "": "CSIDriverSpec is the specification of a CSIDriver.", "attachRequired": "attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.\n\nThis field is immutable.", - "podInfoOnMount": "If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", - "volumeLifecycleModes": "volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future. This field is beta.\n\nThis field is immutable.", - "storageCapacity": "If set to true, storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field was immutable in Kubernetes <= 1.22 and now is mutable.", - "fsGroupPolicy": "Defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field is immutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.", - "tokenRequests": "TokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": {\n \"\": {\n \"token\": ,\n \"expirationTimestamp\": ,\n },\n ...\n}\n\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.", - "requiresRepublish": "RequiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\n\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.", - "seLinuxMount": "SELinuxMount specifies if the CSI driver supports \"-o context\" mount option.\n\nWhen \"true\", the CSI driver must ensure that all volumes provided by this CSI driver can be mounted separately with different `-o context` options. This is typical for storage backends that provide volumes as filesystems on block devices or as independent shared volumes. Kubernetes will call NodeStage / NodePublish with \"-o context=xyz\" mount option when mounting a ReadWriteOncePod volume used in Pod that has explicitly set SELinux context. In the future, it may be expanded to other volume AccessModes. In any case, Kubernetes will ensure that the volume is mounted only with a single SELinux context.\n\nWhen \"false\", Kubernetes won't pass any special SELinux mount options to the driver. This is typical for volumes that represent subdirectories of a bigger shared filesystem.\n\nDefault is \"false\".", + "podInfoOnMount": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", + "volumeLifecycleModes": "volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism.\n\nThe other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume.\n\nFor more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\n\nThis field is beta. This field is immutable.", + "storageCapacity": "storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information, if set to true.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field was immutable in Kubernetes <= 1.22 and now is mutable.", + "fsGroupPolicy": "fsGroupPolicy defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field is immutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.", + "tokenRequests": "tokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": {\n \"\": {\n \"token\": ,\n \"expirationTimestamp\": ,\n },\n ...\n}\n\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.", + "requiresRepublish": "requiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\n\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.", + "seLinuxMount": "seLinuxMount specifies if the CSI driver supports \"-o context\" mount option.\n\nWhen \"true\", the CSI driver must ensure that all volumes provided by this CSI driver can be mounted separately with different `-o context` options. This is typical for storage backends that provide volumes as filesystems on block devices or as independent shared volumes. Kubernetes will call NodeStage / NodePublish with \"-o context=xyz\" mount option when mounting a ReadWriteOncePod volume used in Pod that has explicitly set SELinux context. In the future, it may be expanded to other volume AccessModes. In any case, Kubernetes will ensure that the volume is mounted only with a single SELinux context.\n\nWhen \"false\", Kubernetes won't pass any special SELinux mount options to the driver. This is typical for volumes that represent subdirectories of a bigger shared filesystem.\n\nDefault is \"false\".", } func (CSIDriverSpec) SwaggerDoc() map[string]string { @@ -65,7 +65,7 @@ func (CSIDriverSpec) SwaggerDoc() map[string]string { var map_CSINode = map[string]string{ "": "CSINode holds information about all CSI drivers installed on a node. CSI drivers do not need to create the CSINode object directly. As long as they use the node-driver-registrar sidecar container, the kubelet will automatically populate the CSINode object for the CSI driver as part of kubelet plugin registration. CSINode has the same name as a node. If the object is missing, it means either there are no CSI Drivers available on the node, or the Kubelet version is low enough that it doesn't create this object. CSINode has an OwnerReference that points to the corresponding node object.", - "metadata": "metadata.name must be the Kubernetes node name.", + "metadata": "Standard object's metadata. metadata.name must be the Kubernetes node name.", "spec": "spec is the specification of CSINode", } @@ -75,7 +75,7 @@ func (CSINode) SwaggerDoc() map[string]string { var map_CSINodeDriver = map[string]string{ "": "CSINodeDriver holds information about the specification of one CSI driver installed on a node", - "name": "This is the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver.", + "name": "name represents the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver.", "nodeID": "nodeID of the node from the driver point of view. This field enables Kubernetes to communicate with storage systems that do not share the same nomenclature for nodes. For example, Kubernetes may refer to a given node as \"node1\", but the storage system may refer to the same node as \"nodeA\". When Kubernetes issues a command to the storage system to attach a volume to a specific node, it can use this field to refer to the node name using the ID that the storage system will understand, e.g. \"nodeA\" instead of \"node1\". This field is required.", "topologyKeys": "topologyKeys is the list of keys supported by the driver. When a driver is initialized on a cluster, it provides a set of topology keys that it understands (e.g. \"company.com/zone\", \"company.com/region\"). When a driver is initialized on a node, it provides the same topology keys along with values. Kubelet will expose these topology keys as labels on its own node object. When Kubernetes does topology aware provisioning, it can use this list to determine which labels it should retrieve from the node object and pass back to the driver. It is possible for different nodes to use different topology keys. This can be empty if driver does not support topology.", "allocatable": "allocatable represents the volume resources of a node that are available for scheduling. This field is beta.", @@ -106,11 +106,11 @@ func (CSINodeSpec) SwaggerDoc() map[string]string { var map_CSIStorageCapacity = map[string]string{ "": "CSIStorageCapacity stores the result of one CSI GetCapacity call. For a given StorageClass, this describes the available capacity in a particular topology segment. This can be used when considering where to instantiate new PersistentVolumes.\n\nFor example this can express things like: - StorageClass \"standard\" has \"1234 GiB\" available in \"topology.kubernetes.io/zone=us-east1\" - StorageClass \"localssd\" has \"10 GiB\" available in \"kubernetes.io/hostname=knode-abc123\"\n\nThe following three cases all imply that no capacity is available for a certain combination: - no object exists with suitable topology and storage class name - such an object exists, but the capacity is unset - such an object exists, but the capacity is zero\n\nThe producer of these objects can decide which approach is more suitable.\n\nThey are consumed by the kube-scheduler when a CSI driver opts into capacity-aware scheduling with CSIDriverSpec.StorageCapacity. The scheduler compares the MaximumVolumeSize against the requested size of pending volumes to filter out unsuitable nodes. If MaximumVolumeSize is unset, it falls back to a comparison against the less precise Capacity. If that is also unset, the scheduler assumes that capacity is insufficient and tries some other node.", - "metadata": "Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-, a generated name, or a reverse-domain name which ends with the unique CSI driver name.\n\nObjects are namespaced.\n\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "nodeTopology": "NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.", - "storageClassName": "The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.", - "capacity": "Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThe semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable.", - "maximumVolumeSize": "MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThis is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.", + "metadata": "Standard object's metadata. The name has no particular meaning. It must be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-, a generated name, or a reverse-domain name which ends with the unique CSI driver name.\n\nObjects are namespaced.\n\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "nodeTopology": "nodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.", + "storageClassName": "storageClassName represents the name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.", + "capacity": "capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThe semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable.", + "maximumVolumeSize": "maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThis is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.", } func (CSIStorageCapacity) SwaggerDoc() map[string]string { @@ -120,7 +120,7 @@ func (CSIStorageCapacity) SwaggerDoc() map[string]string { var map_CSIStorageCapacityList = map[string]string{ "": "CSIStorageCapacityList is a collection of CSIStorageCapacity objects.", "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of CSIStorageCapacity objects.", + "items": "items is the list of CSIStorageCapacity objects.", } func (CSIStorageCapacityList) SwaggerDoc() map[string]string { @@ -130,13 +130,13 @@ func (CSIStorageCapacityList) SwaggerDoc() map[string]string { var map_StorageClass = map[string]string{ "": "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\n\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "provisioner": "Provisioner indicates the type of the provisioner.", - "parameters": "Parameters holds the parameters for the provisioner that should create volumes of this storage class.", - "reclaimPolicy": "Dynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete.", - "mountOptions": "Dynamically provisioned PersistentVolumes of this storage class are created with these mountOptions, e.g. [\"ro\", \"soft\"]. Not validated - mount of the PVs will simply fail if one is invalid.", - "allowVolumeExpansion": "AllowVolumeExpansion shows whether the storage class allow volume expand", - "volumeBindingMode": "VolumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature.", - "allowedTopologies": "Restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature.", + "provisioner": "provisioner indicates the type of the provisioner.", + "parameters": "parameters holds the parameters for the provisioner that should create volumes of this storage class.", + "reclaimPolicy": "reclaimPolicy controls the reclaimPolicy for dynamically provisioned PersistentVolumes of this storage class. Defaults to Delete.", + "mountOptions": "mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class. e.g. [\"ro\", \"soft\"]. Not validated - mount of the PVs will simply fail if one is invalid.", + "allowVolumeExpansion": "allowVolumeExpansion shows whether the storage class allow volume expand.", + "volumeBindingMode": "volumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature.", + "allowedTopologies": "allowedTopologies restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature.", } func (StorageClass) SwaggerDoc() map[string]string { @@ -146,7 +146,7 @@ func (StorageClass) SwaggerDoc() map[string]string { var map_StorageClassList = map[string]string{ "": "StorageClassList is a collection of storage classes.", "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of StorageClasses", + "items": "items is the list of StorageClasses", } func (StorageClassList) SwaggerDoc() map[string]string { @@ -155,8 +155,8 @@ func (StorageClassList) SwaggerDoc() map[string]string { var map_TokenRequest = map[string]string{ "": "TokenRequest contains parameters of a service account token.", - "audience": "Audience is the intended audience of the token in \"TokenRequestSpec\". It will default to the audiences of kube apiserver.", - "expirationSeconds": "ExpirationSeconds is the duration of validity of the token in \"TokenRequestSpec\". It has the same default value of \"ExpirationSeconds\" in \"TokenRequestSpec\".", + "audience": "audience is the intended audience of the token in \"TokenRequestSpec\". It will default to the audiences of kube apiserver.", + "expirationSeconds": "expirationSeconds is the duration of validity of the token in \"TokenRequestSpec\". It has the same default value of \"ExpirationSeconds\" in \"TokenRequestSpec\".", } func (TokenRequest) SwaggerDoc() map[string]string { @@ -166,8 +166,8 @@ func (TokenRequest) SwaggerDoc() map[string]string { var map_VolumeAttachment = map[string]string{ "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", - "status": "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", + "spec": "spec represents specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", + "status": "status represents status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", } func (VolumeAttachment) SwaggerDoc() map[string]string { @@ -177,7 +177,7 @@ func (VolumeAttachment) SwaggerDoc() map[string]string { var map_VolumeAttachmentList = map[string]string{ "": "VolumeAttachmentList is a collection of VolumeAttachment objects.", "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of VolumeAttachments", + "items": "items is the list of VolumeAttachments", } func (VolumeAttachmentList) SwaggerDoc() map[string]string { @@ -186,7 +186,7 @@ func (VolumeAttachmentList) SwaggerDoc() map[string]string { var map_VolumeAttachmentSource = map[string]string{ "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", - "persistentVolumeName": "Name of the persistent volume to attach.", + "persistentVolumeName": "persistentVolumeName represents the name of the persistent volume to attach.", } func (VolumeAttachmentSource) SwaggerDoc() map[string]string { @@ -195,9 +195,9 @@ func (VolumeAttachmentSource) SwaggerDoc() map[string]string { var map_VolumeAttachmentSpec = map[string]string{ "": "VolumeAttachmentSpec is the specification of a VolumeAttachment request.", - "attacher": "Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", - "source": "Source represents the volume that should be attached.", - "nodeName": "The node that the volume should be attached to.", + "attacher": "attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", + "source": "source represents the volume that should be attached.", + "nodeName": "nodeName represents the node that the volume should be attached to.", } func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { @@ -206,10 +206,10 @@ func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { var map_VolumeAttachmentStatus = map[string]string{ "": "VolumeAttachmentStatus is the status of a VolumeAttachment request.", - "attached": "Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "attachmentMetadata": "Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "attachError": "The last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "detachError": "The last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", + "attached": "attached indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachmentMetadata": "attachmentMetadata is populated with any information returned by the attach operation, upon successful attach, that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachError": "attachError represents the last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "detachError": "detachError represents the last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", } func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { @@ -218,8 +218,8 @@ func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { var map_VolumeError = map[string]string{ "": "VolumeError captures an error encountered during a volume operation.", - "time": "Time the error was encountered.", - "message": "String detailing the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.", + "time": "time represents the time the error was encountered.", + "message": "message represents the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.", } func (VolumeError) SwaggerDoc() map[string]string { @@ -228,7 +228,7 @@ func (VolumeError) SwaggerDoc() map[string]string { var map_VolumeNodeResources = map[string]string{ "": "VolumeNodeResources is a set of resource limits for scheduling of volumes.", - "count": "Maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is not specified, then the supported number of volumes on this node is unbounded.", + "count": "count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is not specified, then the supported number of volumes on this node is unbounded.", } func (VolumeNodeResources) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.proto b/vendor/k8s.io/api/storage/v1alpha1/generated.proto index a534512260..88250a0f01 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/storage/v1alpha1/generated.proto @@ -67,7 +67,7 @@ message CSIStorageCapacity { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // NodeTopology defines which nodes have access to the storage + // nodeTopology defines which nodes have access to the storage // for which capacity was reported. If not set, the storage is // not accessible from any node in the cluster. If empty, the // storage is accessible from all nodes. This field is @@ -76,7 +76,7 @@ message CSIStorageCapacity { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector nodeTopology = 2; - // The name of the StorageClass that the reported capacity applies to. + // storageClassName represents the name of the StorageClass that the reported capacity applies to. // It must meet the same requirements as the name of a StorageClass // object (non-empty, DNS subdomain). If that object no longer exists, // the CSIStorageCapacity object is obsolete and should be removed by its @@ -84,7 +84,7 @@ message CSIStorageCapacity { // This field is immutable. optional string storageClassName = 3; - // Capacity is the value reported by the CSI driver in its GetCapacityResponse + // capacity is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -96,7 +96,7 @@ message CSIStorageCapacity { // +optional optional k8s.io.apimachinery.pkg.api.resource.Quantity capacity = 4; - // MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse + // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -118,7 +118,7 @@ message CSIStorageCapacityList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of CSIStorageCapacity objects. + // items is the list of CSIStorageCapacity objects. // +listType=map // +listMapKey=name repeated CSIStorageCapacity items = 2; @@ -134,11 +134,11 @@ message VolumeAttachment { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the desired attach/detach volume behavior. + // spec represents specification of the desired attach/detach volume behavior. // Populated by the Kubernetes system. optional VolumeAttachmentSpec spec = 2; - // Status of the VolumeAttachment request. + // status represents status of the VolumeAttachment request. // Populated by the entity completing the attach or detach // operation, i.e. the external-attacher. // +optional @@ -152,7 +152,7 @@ message VolumeAttachmentList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of VolumeAttachments + // items is the list of VolumeAttachments repeated VolumeAttachment items = 2; } @@ -161,7 +161,7 @@ message VolumeAttachmentList { // in future we may allow also inline volumes in pods. // Exactly one member can be set. message VolumeAttachmentSource { - // Name of the persistent volume to attach. + // persistentVolumeName represents the name of the persistent volume to attach. // +optional optional string persistentVolumeName = 1; @@ -177,39 +177,39 @@ message VolumeAttachmentSource { // VolumeAttachmentSpec is the specification of a VolumeAttachment request. message VolumeAttachmentSpec { - // Attacher indicates the name of the volume driver that MUST handle this + // attacher indicates the name of the volume driver that MUST handle this // request. This is the name returned by GetPluginName(). optional string attacher = 1; - // Source represents the volume that should be attached. + // source represents the volume that should be attached. optional VolumeAttachmentSource source = 2; - // The node that the volume should be attached to. + // nodeName represents the node that the volume should be attached to. optional string nodeName = 3; } // VolumeAttachmentStatus is the status of a VolumeAttachment request. message VolumeAttachmentStatus { - // Indicates the volume is successfully attached. + // attached indicates the volume is successfully attached. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. optional bool attached = 1; - // Upon successful attach, this field is populated with any - // information returned by the attach operation that must be passed + // attachmentMetadata is populated with any + // information returned by the attach operation, upon successful attach, that must be passed // into subsequent WaitForAttach or Mount calls. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional map attachmentMetadata = 2; - // The last error encountered during attach operation, if any. + // attachError represents the last error encountered during attach operation, if any. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional optional VolumeError attachError = 3; - // The last error encountered during detach operation, if any. + // detachError represents the last error encountered during detach operation, if any. // This field must only be set by the entity completing the detach // operation, i.e. the external-attacher. // +optional @@ -218,11 +218,11 @@ message VolumeAttachmentStatus { // VolumeError captures an error encountered during a volume operation. message VolumeError { - // Time the error was encountered. + // time represents the time the error was encountered. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; - // String detailing the error encountered during Attach or Detach operation. + // message represents the error encountered during Attach or Detach operation. // This string maybe logged, so it should not contain sensitive // information. // +optional diff --git a/vendor/k8s.io/api/storage/v1alpha1/types.go b/vendor/k8s.io/api/storage/v1alpha1/types.go index fe8c9e3cd0..59ef348a31 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/types.go +++ b/vendor/k8s.io/api/storage/v1alpha1/types.go @@ -41,11 +41,11 @@ type VolumeAttachment struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the desired attach/detach volume behavior. + // spec represents specification of the desired attach/detach volume behavior. // Populated by the Kubernetes system. Spec VolumeAttachmentSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - // Status of the VolumeAttachment request. + // status represents status of the VolumeAttachment request. // Populated by the entity completing the attach or detach // operation, i.e. the external-attacher. // +optional @@ -60,25 +60,26 @@ type VolumeAttachment struct { // VolumeAttachmentList is a collection of VolumeAttachment objects. type VolumeAttachmentList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of VolumeAttachments + // items is the list of VolumeAttachments Items []VolumeAttachment `json:"items" protobuf:"bytes,2,rep,name=items"` } // VolumeAttachmentSpec is the specification of a VolumeAttachment request. type VolumeAttachmentSpec struct { - // Attacher indicates the name of the volume driver that MUST handle this + // attacher indicates the name of the volume driver that MUST handle this // request. This is the name returned by GetPluginName(). Attacher string `json:"attacher" protobuf:"bytes,1,opt,name=attacher"` - // Source represents the volume that should be attached. + // source represents the volume that should be attached. Source VolumeAttachmentSource `json:"source" protobuf:"bytes,2,opt,name=source"` - // The node that the volume should be attached to. + // nodeName represents the node that the volume should be attached to. NodeName string `json:"nodeName" protobuf:"bytes,3,opt,name=nodeName"` } @@ -87,7 +88,7 @@ type VolumeAttachmentSpec struct { // in future we may allow also inline volumes in pods. // Exactly one member can be set. type VolumeAttachmentSource struct { - // Name of the persistent volume to attach. + // persistentVolumeName represents the name of the persistent volume to attach. // +optional PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"` @@ -103,26 +104,26 @@ type VolumeAttachmentSource struct { // VolumeAttachmentStatus is the status of a VolumeAttachment request. type VolumeAttachmentStatus struct { - // Indicates the volume is successfully attached. + // attached indicates the volume is successfully attached. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. Attached bool `json:"attached" protobuf:"varint,1,opt,name=attached"` - // Upon successful attach, this field is populated with any - // information returned by the attach operation that must be passed + // attachmentMetadata is populated with any + // information returned by the attach operation, upon successful attach, that must be passed // into subsequent WaitForAttach or Mount calls. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional AttachmentMetadata map[string]string `json:"attachmentMetadata,omitempty" protobuf:"bytes,2,rep,name=attachmentMetadata"` - // The last error encountered during attach operation, if any. + // attachError represents the last error encountered during attach operation, if any. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional AttachError *VolumeError `json:"attachError,omitempty" protobuf:"bytes,3,opt,name=attachError,casttype=VolumeError"` - // The last error encountered during detach operation, if any. + // detachError represents the last error encountered during detach operation, if any. // This field must only be set by the entity completing the detach // operation, i.e. the external-attacher. // +optional @@ -131,11 +132,11 @@ type VolumeAttachmentStatus struct { // VolumeError captures an error encountered during a volume operation. type VolumeError struct { - // Time the error was encountered. + // time represents the time the error was encountered. // +optional Time metav1.Time `json:"time,omitempty" protobuf:"bytes,1,opt,name=time"` - // String detailing the error encountered during Attach or Detach operation. + // message represents the error encountered during Attach or Detach operation. // This string maybe logged, so it should not contain sensitive // information. // +optional @@ -174,6 +175,7 @@ type VolumeError struct { // node. type CSIStorageCapacity struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. The name has no particular meaning. It must be // be a DNS subdomain (dots allowed, 253 characters). To ensure that // there are no conflicts with other CSI drivers on the cluster, the recommendation @@ -186,7 +188,7 @@ type CSIStorageCapacity struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // NodeTopology defines which nodes have access to the storage + // nodeTopology defines which nodes have access to the storage // for which capacity was reported. If not set, the storage is // not accessible from any node in the cluster. If empty, the // storage is accessible from all nodes. This field is @@ -195,7 +197,7 @@ type CSIStorageCapacity struct { // +optional NodeTopology *metav1.LabelSelector `json:"nodeTopology,omitempty" protobuf:"bytes,2,opt,name=nodeTopology"` - // The name of the StorageClass that the reported capacity applies to. + // storageClassName represents the name of the StorageClass that the reported capacity applies to. // It must meet the same requirements as the name of a StorageClass // object (non-empty, DNS subdomain). If that object no longer exists, // the CSIStorageCapacity object is obsolete and should be removed by its @@ -203,7 +205,7 @@ type CSIStorageCapacity struct { // This field is immutable. StorageClassName string `json:"storageClassName" protobuf:"bytes,3,name=storageClassName"` - // Capacity is the value reported by the CSI driver in its GetCapacityResponse + // capacity is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -215,7 +217,7 @@ type CSIStorageCapacity struct { // +optional Capacity *resource.Quantity `json:"capacity,omitempty" protobuf:"bytes,4,opt,name=capacity"` - // MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse + // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -238,12 +240,13 @@ type CSIStorageCapacity struct { // CSIStorageCapacityList is a collection of CSIStorageCapacity objects. type CSIStorageCapacityList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of CSIStorageCapacity objects. + // items is the list of CSIStorageCapacity objects. // +listType=map // +listMapKey=name Items []CSIStorageCapacity `json:"items" protobuf:"bytes,2,rep,name=items"` diff --git a/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go index a228a3fec7..ba6afbd591 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go @@ -24,16 +24,16 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CSIStorageCapacity = map[string]string{ "": "CSIStorageCapacity stores the result of one CSI GetCapacity call. For a given StorageClass, this describes the available capacity in a particular topology segment. This can be used when considering where to instantiate new PersistentVolumes.\n\nFor example this can express things like: - StorageClass \"standard\" has \"1234 GiB\" available in \"topology.kubernetes.io/zone=us-east1\" - StorageClass \"localssd\" has \"10 GiB\" available in \"kubernetes.io/hostname=knode-abc123\"\n\nThe following three cases all imply that no capacity is available for a certain combination: - no object exists with suitable topology and storage class name - such an object exists, but the capacity is unset - such an object exists, but the capacity is zero\n\nThe producer of these objects can decide which approach is more suitable.\n\nThey are consumed by the kube-scheduler when a CSI driver opts into capacity-aware scheduling with CSIDriverSpec.StorageCapacity. The scheduler compares the MaximumVolumeSize against the requested size of pending volumes to filter out unsuitable nodes. If MaximumVolumeSize is unset, it falls back to a comparison against the less precise Capacity. If that is also unset, the scheduler assumes that capacity is insufficient and tries some other node.", "metadata": "Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-, a generated name, or a reverse-domain name which ends with the unique CSI driver name.\n\nObjects are namespaced.\n\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "nodeTopology": "NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.", - "storageClassName": "The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.", - "capacity": "Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThe semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable.", - "maximumVolumeSize": "MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThis is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.", + "nodeTopology": "nodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.", + "storageClassName": "storageClassName represents the name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.", + "capacity": "capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThe semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable.", + "maximumVolumeSize": "maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThis is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.", } func (CSIStorageCapacity) SwaggerDoc() map[string]string { @@ -43,7 +43,7 @@ func (CSIStorageCapacity) SwaggerDoc() map[string]string { var map_CSIStorageCapacityList = map[string]string{ "": "CSIStorageCapacityList is a collection of CSIStorageCapacity objects.", "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of CSIStorageCapacity objects.", + "items": "items is the list of CSIStorageCapacity objects.", } func (CSIStorageCapacityList) SwaggerDoc() map[string]string { @@ -53,8 +53,8 @@ func (CSIStorageCapacityList) SwaggerDoc() map[string]string { var map_VolumeAttachment = map[string]string{ "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", - "status": "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", + "spec": "spec represents specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", + "status": "status represents status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", } func (VolumeAttachment) SwaggerDoc() map[string]string { @@ -64,7 +64,7 @@ func (VolumeAttachment) SwaggerDoc() map[string]string { var map_VolumeAttachmentList = map[string]string{ "": "VolumeAttachmentList is a collection of VolumeAttachment objects.", "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of VolumeAttachments", + "items": "items is the list of VolumeAttachments", } func (VolumeAttachmentList) SwaggerDoc() map[string]string { @@ -73,7 +73,7 @@ func (VolumeAttachmentList) SwaggerDoc() map[string]string { var map_VolumeAttachmentSource = map[string]string{ "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", - "persistentVolumeName": "Name of the persistent volume to attach.", + "persistentVolumeName": "persistentVolumeName represents the name of the persistent volume to attach.", } func (VolumeAttachmentSource) SwaggerDoc() map[string]string { @@ -82,9 +82,9 @@ func (VolumeAttachmentSource) SwaggerDoc() map[string]string { var map_VolumeAttachmentSpec = map[string]string{ "": "VolumeAttachmentSpec is the specification of a VolumeAttachment request.", - "attacher": "Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", - "source": "Source represents the volume that should be attached.", - "nodeName": "The node that the volume should be attached to.", + "attacher": "attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", + "source": "source represents the volume that should be attached.", + "nodeName": "nodeName represents the node that the volume should be attached to.", } func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { @@ -93,10 +93,10 @@ func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { var map_VolumeAttachmentStatus = map[string]string{ "": "VolumeAttachmentStatus is the status of a VolumeAttachment request.", - "attached": "Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "attachmentMetadata": "Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "attachError": "The last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "detachError": "The last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", + "attached": "attached indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachmentMetadata": "attachmentMetadata is populated with any information returned by the attach operation, upon successful attach, that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachError": "attachError represents the last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "detachError": "detachError represents the last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", } func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { @@ -105,8 +105,8 @@ func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { var map_VolumeError = map[string]string{ "": "VolumeError captures an error encountered during a volume operation.", - "time": "Time the error was encountered.", - "message": "String detailing the error encountered during Attach or Detach operation. This string maybe logged, so it should not contain sensitive information.", + "time": "time represents the time the error was encountered.", + "message": "message represents the error encountered during Attach or Detach operation. This string maybe logged, so it should not contain sensitive information.", } func (VolumeError) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.proto b/vendor/k8s.io/api/storage/v1beta1/generated.proto index bedbd31838..2b354dd471 100644 --- a/vendor/k8s.io/api/storage/v1beta1/generated.proto +++ b/vendor/k8s.io/api/storage/v1beta1/generated.proto @@ -49,7 +49,7 @@ message CSIDriver { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the CSI Driver. + // spec represents the specification of the CSI Driver. optional CSIDriverSpec spec = 2; } @@ -82,16 +82,15 @@ message CSIDriverSpec { // +optional optional bool attachRequired = 1; - // If set to true, podInfoOnMount indicates this CSI volume driver - // requires additional pod information (like podName, podUID, etc.) during - // mount operations. + // podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) + // during mount operations, if set to true. // If set to false, pod information will not be passed on mount. // Default is false. + // // The CSI driver specifies podInfoOnMount as part of driver deployment. - // If true, Kubelet will pass pod information as VolumeContext in the CSI - // NodePublishVolume() calls. - // The CSI driver is responsible for parsing and validating the information - // passed in as VolumeContext. + // If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. + // The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. + // // The following VolumeConext will be passed if podInfoOnMount is set to true. // This list might grow, but the prefix will be used. // "csi.storage.k8s.io/pod.name": pod.Name @@ -112,14 +111,14 @@ message CSIDriverSpec { // +optional optional bool podInfoOnMount = 2; - // VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. - // The default if the list is empty is "Persistent", which is the usage - // defined by the CSI specification and implemented in Kubernetes via the usual - // PV/PVC mechanism. - // The other mode is "Ephemeral". In this mode, volumes are defined inline - // inside the pod spec with CSIVolumeSource and their lifecycle is tied to - // the lifecycle of that pod. A driver has to be aware of this - // because it is only going to get a NodePublishVolume call for such a volume. + // volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. + // The default if the list is empty is "Persistent", which is the usage defined by the + // CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. + // + // The other mode is "Ephemeral". In this mode, volumes are defined inline inside the pod spec + // with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. + // A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. + // // For more information about implementing this mode, see // https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html // A driver can support one or more of these modes and @@ -130,10 +129,9 @@ message CSIDriverSpec { // +optional repeated string volumeLifecycleModes = 3; - // If set to true, storageCapacity indicates that the CSI - // volume driver wants pod scheduling to consider the storage + // storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage // capacity that the driver deployment will report by creating - // CSIStorageCapacity objects with capacity information. + // CSIStorageCapacity objects with capacity information, if set to true. // // The check can be enabled immediately when deploying a driver. // In that case, provisioning new volumes with late binding @@ -149,7 +147,7 @@ message CSIDriverSpec { // +optional optional bool storageCapacity = 4; - // Defines if the underlying volume supports changing ownership and + // fsGroupPolicy defines if the underlying volume supports changing ownership and // permission of the volume before being mounted. // Refer to the specific FSGroupPolicy values for additional details. // @@ -159,10 +157,11 @@ message CSIDriverSpec { // to determine if Kubernetes should modify ownership and permissions of the volume. // With the default policy the defined fsGroup will only be applied // if a fstype is defined and the volume's access mode contains ReadWriteOnce. + // // +optional optional string fsGroupPolicy = 5; - // TokenRequests indicates the CSI driver needs pods' service account + // tokenRequests indicates the CSI driver needs pods' service account // tokens it is mounting volume for to do necessary authentication. Kubelet // will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. // The CSI driver should parse and validate the following VolumeContext: @@ -182,7 +181,7 @@ message CSIDriverSpec { // +listType=atomic repeated TokenRequest tokenRequests = 6; - // RequiresRepublish indicates the CSI driver wants `NodePublishVolume` + // requiresRepublish indicates the CSI driver wants `NodePublishVolume` // being periodically called to reflect any possible change in the mounted // volume. This field defaults to false. // @@ -193,7 +192,7 @@ message CSIDriverSpec { // +optional optional bool requiresRepublish = 7; - // SELinuxMount specifies if the CSI driver supports "-o context" + // seLinuxMount specifies if the CSI driver supports "-o context" // mount option. // // When "true", the CSI driver must ensure that all volumes provided by this CSI @@ -211,6 +210,7 @@ message CSIDriverSpec { // // Default is "false". // + // +featureGate=SELinuxMountReadWriteOncePod // +optional optional bool seLinuxMount = 8; } @@ -236,7 +236,7 @@ message CSINode { // CSINodeDriver holds information about the specification of one CSI driver installed on a node message CSINodeDriver { - // This is the name of the CSI driver that this object refers to. + // name represents the name of the CSI driver that this object refers to. // This MUST be the same name returned by the CSI GetPluginName() call for // that driver. optional string name = 1; @@ -327,7 +327,7 @@ message CSIStorageCapacity { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // NodeTopology defines which nodes have access to the storage + // nodeTopology defines which nodes have access to the storage // for which capacity was reported. If not set, the storage is // not accessible from any node in the cluster. If empty, the // storage is accessible from all nodes. This field is @@ -336,7 +336,7 @@ message CSIStorageCapacity { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector nodeTopology = 2; - // The name of the StorageClass that the reported capacity applies to. + // storageClassName represents the name of the StorageClass that the reported capacity applies to. // It must meet the same requirements as the name of a StorageClass // object (non-empty, DNS subdomain). If that object no longer exists, // the CSIStorageCapacity object is obsolete and should be removed by its @@ -344,7 +344,7 @@ message CSIStorageCapacity { // This field is immutable. optional string storageClassName = 3; - // Capacity is the value reported by the CSI driver in its GetCapacityResponse + // capacity is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -356,7 +356,7 @@ message CSIStorageCapacity { // +optional optional k8s.io.apimachinery.pkg.api.resource.Quantity capacity = 4; - // MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse + // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -378,7 +378,7 @@ message CSIStorageCapacityList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of CSIStorageCapacity objects. + // items is the list of CSIStorageCapacity objects. // +listType=map // +listMapKey=name repeated CSIStorageCapacity items = 2; @@ -395,36 +395,36 @@ message StorageClass { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Provisioner indicates the type of the provisioner. + // provisioner indicates the type of the provisioner. optional string provisioner = 2; - // Parameters holds the parameters for the provisioner that should + // parameters holds the parameters for the provisioner that should // create volumes of this storage class. // +optional map parameters = 3; - // Dynamically provisioned PersistentVolumes of this storage class are - // created with this reclaimPolicy. Defaults to Delete. + // reclaimPolicy controls the reclaimPolicy for dynamically provisioned PersistentVolumes of this storage class. + // Defaults to Delete. // +optional optional string reclaimPolicy = 4; - // Dynamically provisioned PersistentVolumes of this storage class are - // created with these mountOptions, e.g. ["ro", "soft"]. Not validated - + // mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class. + // e.g. ["ro", "soft"]. Not validated - // mount of the PVs will simply fail if one is invalid. // +optional repeated string mountOptions = 5; - // AllowVolumeExpansion shows whether the storage class allow volume expand + // allowVolumeExpansion shows whether the storage class allow volume expand // +optional optional bool allowVolumeExpansion = 6; - // VolumeBindingMode indicates how PersistentVolumeClaims should be + // volumeBindingMode indicates how PersistentVolumeClaims should be // provisioned and bound. When unset, VolumeBindingImmediate is used. // This field is only honored by servers that enable the VolumeScheduling feature. // +optional optional string volumeBindingMode = 7; - // Restrict the node topologies where volumes can be dynamically provisioned. + // allowedTopologies restrict the node topologies where volumes can be dynamically provisioned. // Each volume plugin defines its own supported topology specifications. // An empty TopologySelectorTerm list means there is no topology restriction. // This field is only honored by servers that enable the VolumeScheduling feature. @@ -440,17 +440,17 @@ message StorageClassList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of StorageClasses + // items is the list of StorageClasses repeated StorageClass items = 2; } // TokenRequest contains parameters of a service account token. message TokenRequest { - // Audience is the intended audience of the token in "TokenRequestSpec". + // audience is the intended audience of the token in "TokenRequestSpec". // It will default to the audiences of kube apiserver. optional string audience = 1; - // ExpirationSeconds is the duration of validity of the token in "TokenRequestSpec". + // expirationSeconds is the duration of validity of the token in "TokenRequestSpec". // It has the same default value of "ExpirationSeconds" in "TokenRequestSpec" // // +optional @@ -467,11 +467,11 @@ message VolumeAttachment { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the desired attach/detach volume behavior. + // spec represents specification of the desired attach/detach volume behavior. // Populated by the Kubernetes system. optional VolumeAttachmentSpec spec = 2; - // Status of the VolumeAttachment request. + // status represents status of the VolumeAttachment request. // Populated by the entity completing the attach or detach // operation, i.e. the external-attacher. // +optional @@ -485,7 +485,7 @@ message VolumeAttachmentList { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of VolumeAttachments + // items is the list of VolumeAttachments repeated VolumeAttachment items = 2; } @@ -494,7 +494,7 @@ message VolumeAttachmentList { // in future we may allow also inline volumes in pods. // Exactly one member can be set. message VolumeAttachmentSource { - // Name of the persistent volume to attach. + // persistentVolumeName represents the name of the persistent volume to attach. // +optional optional string persistentVolumeName = 1; @@ -510,39 +510,39 @@ message VolumeAttachmentSource { // VolumeAttachmentSpec is the specification of a VolumeAttachment request. message VolumeAttachmentSpec { - // Attacher indicates the name of the volume driver that MUST handle this + // attacher indicates the name of the volume driver that MUST handle this // request. This is the name returned by GetPluginName(). optional string attacher = 1; - // Source represents the volume that should be attached. + // source represents the volume that should be attached. optional VolumeAttachmentSource source = 2; - // The node that the volume should be attached to. + // nodeName represents the node that the volume should be attached to. optional string nodeName = 3; } // VolumeAttachmentStatus is the status of a VolumeAttachment request. message VolumeAttachmentStatus { - // Indicates the volume is successfully attached. + // attached indicates the volume is successfully attached. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. optional bool attached = 1; - // Upon successful attach, this field is populated with any - // information returned by the attach operation that must be passed + // attachmentMetadata is populated with any + // information returned by the attach operation, upon successful attach, that must be passed // into subsequent WaitForAttach or Mount calls. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional map attachmentMetadata = 2; - // The last error encountered during attach operation, if any. + // attachError represents the last error encountered during attach operation, if any. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional optional VolumeError attachError = 3; - // The last error encountered during detach operation, if any. + // detachError represents the last error encountered during detach operation, if any. // This field must only be set by the entity completing the detach // operation, i.e. the external-attacher. // +optional @@ -551,11 +551,11 @@ message VolumeAttachmentStatus { // VolumeError captures an error encountered during a volume operation. message VolumeError { - // Time the error was encountered. + // time represents the time the error was encountered. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; - // String detailing the error encountered during Attach or Detach operation. + // message represents the error encountered during Attach or Detach operation. // This string may be logged, so it should not contain sensitive // information. // +optional @@ -564,7 +564,7 @@ message VolumeError { // VolumeNodeResources is a set of resource limits for scheduling of volumes. message VolumeNodeResources { - // Maximum number of unique volumes managed by the CSI driver that can be used on a node. + // count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. // A volume that is both attached and mounted on a node is considered to be used once, not twice. // The same rule applies for a unique volume that is shared among multiple pods on the same node. // If this field is nil, then the supported number of volumes on this node is unbounded. diff --git a/vendor/k8s.io/api/storage/v1beta1/types.go b/vendor/k8s.io/api/storage/v1beta1/types.go index f4d09b641a..4c39b49ccd 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types.go +++ b/vendor/k8s.io/api/storage/v1beta1/types.go @@ -36,41 +36,42 @@ import ( // according to etcd is in ObjectMeta.Name. type StorageClass struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Provisioner indicates the type of the provisioner. + // provisioner indicates the type of the provisioner. Provisioner string `json:"provisioner" protobuf:"bytes,2,opt,name=provisioner"` - // Parameters holds the parameters for the provisioner that should + // parameters holds the parameters for the provisioner that should // create volumes of this storage class. // +optional Parameters map[string]string `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"` - // Dynamically provisioned PersistentVolumes of this storage class are - // created with this reclaimPolicy. Defaults to Delete. + // reclaimPolicy controls the reclaimPolicy for dynamically provisioned PersistentVolumes of this storage class. + // Defaults to Delete. // +optional ReclaimPolicy *v1.PersistentVolumeReclaimPolicy `json:"reclaimPolicy,omitempty" protobuf:"bytes,4,opt,name=reclaimPolicy,casttype=k8s.io/api/core/v1.PersistentVolumeReclaimPolicy"` - // Dynamically provisioned PersistentVolumes of this storage class are - // created with these mountOptions, e.g. ["ro", "soft"]. Not validated - + // mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class. + // e.g. ["ro", "soft"]. Not validated - // mount of the PVs will simply fail if one is invalid. // +optional MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,5,opt,name=mountOptions"` - // AllowVolumeExpansion shows whether the storage class allow volume expand + // allowVolumeExpansion shows whether the storage class allow volume expand // +optional AllowVolumeExpansion *bool `json:"allowVolumeExpansion,omitempty" protobuf:"varint,6,opt,name=allowVolumeExpansion"` - // VolumeBindingMode indicates how PersistentVolumeClaims should be + // volumeBindingMode indicates how PersistentVolumeClaims should be // provisioned and bound. When unset, VolumeBindingImmediate is used. // This field is only honored by servers that enable the VolumeScheduling feature. // +optional VolumeBindingMode *VolumeBindingMode `json:"volumeBindingMode,omitempty" protobuf:"bytes,7,opt,name=volumeBindingMode"` - // Restrict the node topologies where volumes can be dynamically provisioned. + // allowedTopologies restrict the node topologies where volumes can be dynamically provisioned. // Each volume plugin defines its own supported topology specifications. // An empty TopologySelectorTerm list means there is no topology restriction. // This field is only honored by servers that enable the VolumeScheduling feature. @@ -87,12 +88,13 @@ type StorageClass struct { // StorageClassList is a collection of storage classes. type StorageClassList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of StorageClasses + // items is the list of StorageClasses Items []StorageClass `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -130,11 +132,11 @@ type VolumeAttachment struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the desired attach/detach volume behavior. + // spec represents specification of the desired attach/detach volume behavior. // Populated by the Kubernetes system. Spec VolumeAttachmentSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - // Status of the VolumeAttachment request. + // status represents status of the VolumeAttachment request. // Populated by the entity completing the attach or detach // operation, i.e. the external-attacher. // +optional @@ -149,25 +151,26 @@ type VolumeAttachment struct { // VolumeAttachmentList is a collection of VolumeAttachment objects. type VolumeAttachmentList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of VolumeAttachments + // items is the list of VolumeAttachments Items []VolumeAttachment `json:"items" protobuf:"bytes,2,rep,name=items"` } // VolumeAttachmentSpec is the specification of a VolumeAttachment request. type VolumeAttachmentSpec struct { - // Attacher indicates the name of the volume driver that MUST handle this + // attacher indicates the name of the volume driver that MUST handle this // request. This is the name returned by GetPluginName(). Attacher string `json:"attacher" protobuf:"bytes,1,opt,name=attacher"` - // Source represents the volume that should be attached. + // source represents the volume that should be attached. Source VolumeAttachmentSource `json:"source" protobuf:"bytes,2,opt,name=source"` - // The node that the volume should be attached to. + // nodeName represents the node that the volume should be attached to. NodeName string `json:"nodeName" protobuf:"bytes,3,opt,name=nodeName"` } @@ -176,7 +179,7 @@ type VolumeAttachmentSpec struct { // in future we may allow also inline volumes in pods. // Exactly one member can be set. type VolumeAttachmentSource struct { - // Name of the persistent volume to attach. + // persistentVolumeName represents the name of the persistent volume to attach. // +optional PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"` @@ -192,26 +195,26 @@ type VolumeAttachmentSource struct { // VolumeAttachmentStatus is the status of a VolumeAttachment request. type VolumeAttachmentStatus struct { - // Indicates the volume is successfully attached. + // attached indicates the volume is successfully attached. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. Attached bool `json:"attached" protobuf:"varint,1,opt,name=attached"` - // Upon successful attach, this field is populated with any - // information returned by the attach operation that must be passed + // attachmentMetadata is populated with any + // information returned by the attach operation, upon successful attach, that must be passed // into subsequent WaitForAttach or Mount calls. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional AttachmentMetadata map[string]string `json:"attachmentMetadata,omitempty" protobuf:"bytes,2,rep,name=attachmentMetadata"` - // The last error encountered during attach operation, if any. + // attachError represents the last error encountered during attach operation, if any. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional AttachError *VolumeError `json:"attachError,omitempty" protobuf:"bytes,3,opt,name=attachError,casttype=VolumeError"` - // The last error encountered during detach operation, if any. + // detachError represents the last error encountered during detach operation, if any. // This field must only be set by the entity completing the detach // operation, i.e. the external-attacher. // +optional @@ -220,11 +223,11 @@ type VolumeAttachmentStatus struct { // VolumeError captures an error encountered during a volume operation. type VolumeError struct { - // Time the error was encountered. + // time represents the time the error was encountered. // +optional Time metav1.Time `json:"time,omitempty" protobuf:"bytes,1,opt,name=time"` - // String detailing the error encountered during Attach or Detach operation. + // message represents the error encountered during Attach or Detach operation. // This string may be logged, so it should not contain sensitive // information. // +optional @@ -259,7 +262,7 @@ type CSIDriver struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the CSI Driver. + // spec represents the specification of the CSI Driver. Spec CSIDriverSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` } @@ -299,16 +302,15 @@ type CSIDriverSpec struct { // +optional AttachRequired *bool `json:"attachRequired,omitempty" protobuf:"varint,1,opt,name=attachRequired"` - // If set to true, podInfoOnMount indicates this CSI volume driver - // requires additional pod information (like podName, podUID, etc.) during - // mount operations. + // podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) + // during mount operations, if set to true. // If set to false, pod information will not be passed on mount. // Default is false. + // // The CSI driver specifies podInfoOnMount as part of driver deployment. - // If true, Kubelet will pass pod information as VolumeContext in the CSI - // NodePublishVolume() calls. - // The CSI driver is responsible for parsing and validating the information - // passed in as VolumeContext. + // If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. + // The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. + // // The following VolumeConext will be passed if podInfoOnMount is set to true. // This list might grow, but the prefix will be used. // "csi.storage.k8s.io/pod.name": pod.Name @@ -329,14 +331,14 @@ type CSIDriverSpec struct { // +optional PodInfoOnMount *bool `json:"podInfoOnMount,omitempty" protobuf:"bytes,2,opt,name=podInfoOnMount"` - // VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. - // The default if the list is empty is "Persistent", which is the usage - // defined by the CSI specification and implemented in Kubernetes via the usual - // PV/PVC mechanism. - // The other mode is "Ephemeral". In this mode, volumes are defined inline - // inside the pod spec with CSIVolumeSource and their lifecycle is tied to - // the lifecycle of that pod. A driver has to be aware of this - // because it is only going to get a NodePublishVolume call for such a volume. + // volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. + // The default if the list is empty is "Persistent", which is the usage defined by the + // CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. + // + // The other mode is "Ephemeral". In this mode, volumes are defined inline inside the pod spec + // with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. + // A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. + // // For more information about implementing this mode, see // https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html // A driver can support one or more of these modes and @@ -347,11 +349,9 @@ type CSIDriverSpec struct { // +optional VolumeLifecycleModes []VolumeLifecycleMode `json:"volumeLifecycleModes,omitempty" protobuf:"bytes,3,opt,name=volumeLifecycleModes"` - // If set to true, storageCapacity indicates that the CSI - // volume driver wants pod scheduling to consider the storage + // storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage // capacity that the driver deployment will report by creating - // CSIStorageCapacity objects with capacity information. - // + // CSIStorageCapacity objects with capacity information, if set to true. // // The check can be enabled immediately when deploying a driver. // In that case, provisioning new volumes with late binding @@ -367,7 +367,7 @@ type CSIDriverSpec struct { // +optional StorageCapacity *bool `json:"storageCapacity,omitempty" protobuf:"bytes,4,opt,name=storageCapacity"` - // Defines if the underlying volume supports changing ownership and + // fsGroupPolicy defines if the underlying volume supports changing ownership and // permission of the volume before being mounted. // Refer to the specific FSGroupPolicy values for additional details. // @@ -377,10 +377,11 @@ type CSIDriverSpec struct { // to determine if Kubernetes should modify ownership and permissions of the volume. // With the default policy the defined fsGroup will only be applied // if a fstype is defined and the volume's access mode contains ReadWriteOnce. + // // +optional FSGroupPolicy *FSGroupPolicy `json:"fsGroupPolicy,omitempty" protobuf:"bytes,5,opt,name=fsGroupPolicy"` - // TokenRequests indicates the CSI driver needs pods' service account + // tokenRequests indicates the CSI driver needs pods' service account // tokens it is mounting volume for to do necessary authentication. Kubelet // will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. // The CSI driver should parse and validate the following VolumeContext: @@ -400,7 +401,7 @@ type CSIDriverSpec struct { // +listType=atomic TokenRequests []TokenRequest `json:"tokenRequests,omitempty" protobuf:"bytes,6,opt,name=tokenRequests"` - // RequiresRepublish indicates the CSI driver wants `NodePublishVolume` + // requiresRepublish indicates the CSI driver wants `NodePublishVolume` // being periodically called to reflect any possible change in the mounted // volume. This field defaults to false. // @@ -411,7 +412,7 @@ type CSIDriverSpec struct { // +optional RequiresRepublish *bool `json:"requiresRepublish,omitempty" protobuf:"varint,7,opt,name=requiresRepublish"` - // SELinuxMount specifies if the CSI driver supports "-o context" + // seLinuxMount specifies if the CSI driver supports "-o context" // mount option. // // When "true", the CSI driver must ensure that all volumes provided by this CSI @@ -429,6 +430,7 @@ type CSIDriverSpec struct { // // Default is "false". // + // +featureGate=SELinuxMountReadWriteOncePod // +optional SELinuxMount *bool `json:"seLinuxMount,omitempty" protobuf:"varint,8,opt,name=seLinuxMount"` } @@ -466,12 +468,11 @@ type VolumeLifecycleMode string // TokenRequest contains parameters of a service account token. type TokenRequest struct { - // Audience is the intended audience of the token in "TokenRequestSpec". + // audience is the intended audience of the token in "TokenRequestSpec". // It will default to the audiences of kube apiserver. - // Audience string `json:"audience" protobuf:"bytes,1,opt,name=audience"` - // ExpirationSeconds is the duration of validity of the token in "TokenRequestSpec". + // expirationSeconds is the duration of validity of the token in "TokenRequestSpec". // It has the same default value of "ExpirationSeconds" in "TokenRequestSpec" // // +optional @@ -539,7 +540,7 @@ type CSINodeSpec struct { // CSINodeDriver holds information about the specification of one CSI driver installed on a node type CSINodeDriver struct { - // This is the name of the CSI driver that this object refers to. + // name represents the name of the CSI driver that this object refers to. // This MUST be the same name returned by the CSI GetPluginName() call for // that driver. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` @@ -575,7 +576,7 @@ type CSINodeDriver struct { // VolumeNodeResources is a set of resource limits for scheduling of volumes. type VolumeNodeResources struct { - // Maximum number of unique volumes managed by the CSI driver that can be used on a node. + // count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. // A volume that is both attached and mounted on a node is considered to be used once, not twice. // The same rule applies for a unique volume that is shared among multiple pods on the same node. // If this field is nil, then the supported number of volumes on this node is unbounded. @@ -634,6 +635,7 @@ type CSINodeList struct { // node. type CSIStorageCapacity struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. The name has no particular meaning. It must be // be a DNS subdomain (dots allowed, 253 characters). To ensure that // there are no conflicts with other CSI drivers on the cluster, the recommendation @@ -646,7 +648,7 @@ type CSIStorageCapacity struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // NodeTopology defines which nodes have access to the storage + // nodeTopology defines which nodes have access to the storage // for which capacity was reported. If not set, the storage is // not accessible from any node in the cluster. If empty, the // storage is accessible from all nodes. This field is @@ -655,7 +657,7 @@ type CSIStorageCapacity struct { // +optional NodeTopology *metav1.LabelSelector `json:"nodeTopology,omitempty" protobuf:"bytes,2,opt,name=nodeTopology"` - // The name of the StorageClass that the reported capacity applies to. + // storageClassName represents the name of the StorageClass that the reported capacity applies to. // It must meet the same requirements as the name of a StorageClass // object (non-empty, DNS subdomain). If that object no longer exists, // the CSIStorageCapacity object is obsolete and should be removed by its @@ -663,7 +665,7 @@ type CSIStorageCapacity struct { // This field is immutable. StorageClassName string `json:"storageClassName" protobuf:"bytes,3,name=storageClassName"` - // Capacity is the value reported by the CSI driver in its GetCapacityResponse + // capacity is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -675,7 +677,7 @@ type CSIStorageCapacity struct { // +optional Capacity *resource.Quantity `json:"capacity,omitempty" protobuf:"bytes,4,opt,name=capacity"` - // MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse + // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -698,12 +700,13 @@ type CSIStorageCapacity struct { // CSIStorageCapacityList is a collection of CSIStorageCapacity objects. type CSIStorageCapacityList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of CSIStorageCapacity objects. + // items is the list of CSIStorageCapacity objects. // +listType=map // +listMapKey=name Items []CSIStorageCapacity `json:"items" protobuf:"bytes,2,rep,name=items"` diff --git a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go index ea3c1e4c28..0f2718b9c1 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go @@ -24,13 +24,13 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CSIDriver = map[string]string{ "": "CSIDriver captures information about a Container Storage Interface (CSI) volume driver deployed on the cluster. CSI drivers do not need to create the CSIDriver object directly. Instead they may use the cluster-driver-registrar sidecar container. When deployed with a CSI driver it automatically creates a CSIDriver object representing the driver. Kubernetes attach detach controller uses this object to determine whether attach is required. Kubelet uses this object to determine whether pod information needs to be passed on mount. CSIDriver objects are non-namespaced.", "metadata": "Standard object metadata. metadata.Name indicates the name of the CSI driver that this object refers to; it MUST be the same name returned by the CSI GetPluginName() call for that driver. The driver name must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and alphanumerics between. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the CSI Driver.", + "spec": "spec represents the specification of the CSI Driver.", } func (CSIDriver) SwaggerDoc() map[string]string { @@ -50,13 +50,13 @@ func (CSIDriverList) SwaggerDoc() map[string]string { var map_CSIDriverSpec = map[string]string{ "": "CSIDriverSpec is the specification of a CSIDriver.", "attachRequired": "attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.\n\nThis field is immutable.", - "podInfoOnMount": "If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", - "volumeLifecycleModes": "VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\n\nThis field is immutable.", - "storageCapacity": "If set to true, storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field was immutable in Kubernetes <= 1.22 and now is mutable.", - "fsGroupPolicy": "Defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field is immutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.", - "tokenRequests": "TokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": {\n \"\": {\n \"token\": ,\n \"expirationTimestamp\": ,\n },\n ...\n}\n\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.", - "requiresRepublish": "RequiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\n\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.", - "seLinuxMount": "SELinuxMount specifies if the CSI driver supports \"-o context\" mount option.\n\nWhen \"true\", the CSI driver must ensure that all volumes provided by this CSI driver can be mounted separately with different `-o context` options. This is typical for storage backends that provide volumes as filesystems on block devices or as independent shared volumes. Kubernetes will call NodeStage / NodePublish with \"-o context=xyz\" mount option when mounting a ReadWriteOncePod volume used in Pod that has explicitly set SELinux context. In the future, it may be expanded to other volume AccessModes. In any case, Kubernetes will ensure that the volume is mounted only with a single SELinux context.\n\nWhen \"false\", Kubernetes won't pass any special SELinux mount options to the driver. This is typical for volumes that represent subdirectories of a bigger shared filesystem.\n\nDefault is \"false\".", + "podInfoOnMount": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", + "volumeLifecycleModes": "volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism.\n\nThe other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume.\n\nFor more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\n\nThis field is immutable.", + "storageCapacity": "storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information, if set to true.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field was immutable in Kubernetes <= 1.22 and now is mutable.", + "fsGroupPolicy": "fsGroupPolicy defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field is immutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.", + "tokenRequests": "tokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": {\n \"\": {\n \"token\": ,\n \"expirationTimestamp\": ,\n },\n ...\n}\n\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.", + "requiresRepublish": "requiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\n\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.", + "seLinuxMount": "seLinuxMount specifies if the CSI driver supports \"-o context\" mount option.\n\nWhen \"true\", the CSI driver must ensure that all volumes provided by this CSI driver can be mounted separately with different `-o context` options. This is typical for storage backends that provide volumes as filesystems on block devices or as independent shared volumes. Kubernetes will call NodeStage / NodePublish with \"-o context=xyz\" mount option when mounting a ReadWriteOncePod volume used in Pod that has explicitly set SELinux context. In the future, it may be expanded to other volume AccessModes. In any case, Kubernetes will ensure that the volume is mounted only with a single SELinux context.\n\nWhen \"false\", Kubernetes won't pass any special SELinux mount options to the driver. This is typical for volumes that represent subdirectories of a bigger shared filesystem.\n\nDefault is \"false\".", } func (CSIDriverSpec) SwaggerDoc() map[string]string { @@ -75,7 +75,7 @@ func (CSINode) SwaggerDoc() map[string]string { var map_CSINodeDriver = map[string]string{ "": "CSINodeDriver holds information about the specification of one CSI driver installed on a node", - "name": "This is the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver.", + "name": "name represents the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver.", "nodeID": "nodeID of the node from the driver point of view. This field enables Kubernetes to communicate with storage systems that do not share the same nomenclature for nodes. For example, Kubernetes may refer to a given node as \"node1\", but the storage system may refer to the same node as \"nodeA\". When Kubernetes issues a command to the storage system to attach a volume to a specific node, it can use this field to refer to the node name using the ID that the storage system will understand, e.g. \"nodeA\" instead of \"node1\". This field is required.", "topologyKeys": "topologyKeys is the list of keys supported by the driver. When a driver is initialized on a cluster, it provides a set of topology keys that it understands (e.g. \"company.com/zone\", \"company.com/region\"). When a driver is initialized on a node, it provides the same topology keys along with values. Kubelet will expose these topology keys as labels on its own node object. When Kubernetes does topology aware provisioning, it can use this list to determine which labels it should retrieve from the node object and pass back to the driver. It is possible for different nodes to use different topology keys. This can be empty if driver does not support topology.", "allocatable": "allocatable represents the volume resources of a node that are available for scheduling.", @@ -107,10 +107,10 @@ func (CSINodeSpec) SwaggerDoc() map[string]string { var map_CSIStorageCapacity = map[string]string{ "": "CSIStorageCapacity stores the result of one CSI GetCapacity call. For a given StorageClass, this describes the available capacity in a particular topology segment. This can be used when considering where to instantiate new PersistentVolumes.\n\nFor example this can express things like: - StorageClass \"standard\" has \"1234 GiB\" available in \"topology.kubernetes.io/zone=us-east1\" - StorageClass \"localssd\" has \"10 GiB\" available in \"kubernetes.io/hostname=knode-abc123\"\n\nThe following three cases all imply that no capacity is available for a certain combination: - no object exists with suitable topology and storage class name - such an object exists, but the capacity is unset - such an object exists, but the capacity is zero\n\nThe producer of these objects can decide which approach is more suitable.\n\nThey are consumed by the kube-scheduler when a CSI driver opts into capacity-aware scheduling with CSIDriverSpec.StorageCapacity. The scheduler compares the MaximumVolumeSize against the requested size of pending volumes to filter out unsuitable nodes. If MaximumVolumeSize is unset, it falls back to a comparison against the less precise Capacity. If that is also unset, the scheduler assumes that capacity is insufficient and tries some other node.", "metadata": "Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-, a generated name, or a reverse-domain name which ends with the unique CSI driver name.\n\nObjects are namespaced.\n\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "nodeTopology": "NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.", - "storageClassName": "The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.", - "capacity": "Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThe semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable.", - "maximumVolumeSize": "MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThis is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.", + "nodeTopology": "nodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.", + "storageClassName": "storageClassName represents the name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.", + "capacity": "capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThe semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable.", + "maximumVolumeSize": "maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThis is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.", } func (CSIStorageCapacity) SwaggerDoc() map[string]string { @@ -120,7 +120,7 @@ func (CSIStorageCapacity) SwaggerDoc() map[string]string { var map_CSIStorageCapacityList = map[string]string{ "": "CSIStorageCapacityList is a collection of CSIStorageCapacity objects.", "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of CSIStorageCapacity objects.", + "items": "items is the list of CSIStorageCapacity objects.", } func (CSIStorageCapacityList) SwaggerDoc() map[string]string { @@ -130,13 +130,13 @@ func (CSIStorageCapacityList) SwaggerDoc() map[string]string { var map_StorageClass = map[string]string{ "": "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\n\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "provisioner": "Provisioner indicates the type of the provisioner.", - "parameters": "Parameters holds the parameters for the provisioner that should create volumes of this storage class.", - "reclaimPolicy": "Dynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete.", - "mountOptions": "Dynamically provisioned PersistentVolumes of this storage class are created with these mountOptions, e.g. [\"ro\", \"soft\"]. Not validated - mount of the PVs will simply fail if one is invalid.", - "allowVolumeExpansion": "AllowVolumeExpansion shows whether the storage class allow volume expand", - "volumeBindingMode": "VolumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature.", - "allowedTopologies": "Restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature.", + "provisioner": "provisioner indicates the type of the provisioner.", + "parameters": "parameters holds the parameters for the provisioner that should create volumes of this storage class.", + "reclaimPolicy": "reclaimPolicy controls the reclaimPolicy for dynamically provisioned PersistentVolumes of this storage class. Defaults to Delete.", + "mountOptions": "mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class. e.g. [\"ro\", \"soft\"]. Not validated - mount of the PVs will simply fail if one is invalid.", + "allowVolumeExpansion": "allowVolumeExpansion shows whether the storage class allow volume expand", + "volumeBindingMode": "volumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature.", + "allowedTopologies": "allowedTopologies restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature.", } func (StorageClass) SwaggerDoc() map[string]string { @@ -146,7 +146,7 @@ func (StorageClass) SwaggerDoc() map[string]string { var map_StorageClassList = map[string]string{ "": "StorageClassList is a collection of storage classes.", "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of StorageClasses", + "items": "items is the list of StorageClasses", } func (StorageClassList) SwaggerDoc() map[string]string { @@ -155,8 +155,8 @@ func (StorageClassList) SwaggerDoc() map[string]string { var map_TokenRequest = map[string]string{ "": "TokenRequest contains parameters of a service account token.", - "audience": "Audience is the intended audience of the token in \"TokenRequestSpec\". It will default to the audiences of kube apiserver.", - "expirationSeconds": "ExpirationSeconds is the duration of validity of the token in \"TokenRequestSpec\". It has the same default value of \"ExpirationSeconds\" in \"TokenRequestSpec\"", + "audience": "audience is the intended audience of the token in \"TokenRequestSpec\". It will default to the audiences of kube apiserver.", + "expirationSeconds": "expirationSeconds is the duration of validity of the token in \"TokenRequestSpec\". It has the same default value of \"ExpirationSeconds\" in \"TokenRequestSpec\"", } func (TokenRequest) SwaggerDoc() map[string]string { @@ -166,8 +166,8 @@ func (TokenRequest) SwaggerDoc() map[string]string { var map_VolumeAttachment = map[string]string{ "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", - "status": "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", + "spec": "spec represents specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", + "status": "status represents status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", } func (VolumeAttachment) SwaggerDoc() map[string]string { @@ -177,7 +177,7 @@ func (VolumeAttachment) SwaggerDoc() map[string]string { var map_VolumeAttachmentList = map[string]string{ "": "VolumeAttachmentList is a collection of VolumeAttachment objects.", "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of VolumeAttachments", + "items": "items is the list of VolumeAttachments", } func (VolumeAttachmentList) SwaggerDoc() map[string]string { @@ -186,7 +186,7 @@ func (VolumeAttachmentList) SwaggerDoc() map[string]string { var map_VolumeAttachmentSource = map[string]string{ "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", - "persistentVolumeName": "Name of the persistent volume to attach.", + "persistentVolumeName": "persistentVolumeName represents the name of the persistent volume to attach.", } func (VolumeAttachmentSource) SwaggerDoc() map[string]string { @@ -195,9 +195,9 @@ func (VolumeAttachmentSource) SwaggerDoc() map[string]string { var map_VolumeAttachmentSpec = map[string]string{ "": "VolumeAttachmentSpec is the specification of a VolumeAttachment request.", - "attacher": "Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", - "source": "Source represents the volume that should be attached.", - "nodeName": "The node that the volume should be attached to.", + "attacher": "attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", + "source": "source represents the volume that should be attached.", + "nodeName": "nodeName represents the node that the volume should be attached to.", } func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { @@ -206,10 +206,10 @@ func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { var map_VolumeAttachmentStatus = map[string]string{ "": "VolumeAttachmentStatus is the status of a VolumeAttachment request.", - "attached": "Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "attachmentMetadata": "Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "attachError": "The last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "detachError": "The last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", + "attached": "attached indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachmentMetadata": "attachmentMetadata is populated with any information returned by the attach operation, upon successful attach, that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachError": "attachError represents the last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "detachError": "detachError represents the last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", } func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { @@ -218,8 +218,8 @@ func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { var map_VolumeError = map[string]string{ "": "VolumeError captures an error encountered during a volume operation.", - "time": "Time the error was encountered.", - "message": "String detailing the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.", + "time": "time represents the time the error was encountered.", + "message": "message represents the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.", } func (VolumeError) SwaggerDoc() map[string]string { @@ -228,7 +228,7 @@ func (VolumeError) SwaggerDoc() map[string]string { var map_VolumeNodeResources = map[string]string{ "": "VolumeNodeResources is a set of resource limits for scheduling of volumes.", - "count": "Maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is nil, then the supported number of volumes on this node is unbounded.", + "count": "count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is nil, then the supported number of volumes on this node is unbounded.", } func (VolumeNodeResources) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go index f402c416d6..04ce206bb9 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go @@ -196,6 +196,18 @@ type ValidationRule struct { // If unset, the message is "failed rule: {Rule}". // e.g. "must be a URL with the host matching spec.host" Message string + // MessageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. + // Since messageExpression is used as a failure message, it must evaluate to a string. + // If both message and messageExpression are present on a rule, then messageExpression will be used if validation + // fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced + // as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string + // that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and + // the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. + // messageExpression has access to all the same variables as the rule; the only difference is the return type. + // Example: + // "x must be less than max ("+string(self.max)+")" + // +optional + MessageExpression string } // JSON represents any valid JSON value. diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.pb.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.pb.go index 68eb08082d..5dbb38c8bf 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.pb.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.pb.go @@ -814,198 +814,199 @@ func init() { } var fileDescriptor_f5a35c9667703937 = []byte{ - // 3047 bytes of a gzipped FileDescriptorProto + // 3072 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x5a, 0xdf, 0x6f, 0x24, 0x47, 0xf1, 0xbf, 0x59, 0xff, 0x5a, 0xb7, 0xed, 0xb3, 0xdd, 0x77, 0xf6, 0x77, 0xce, 0xb9, 0xf3, 0xfa, 0x36, 0xdf, 0x1c, 0x4e, 0x72, 0x59, 0x27, 0x26, 0x21, 0x47, 0x84, 0x40, 0x5e, 0xdb, 0x97, 0x38, - 0x67, 0x9f, 0xad, 0xde, 0xbb, 0x8b, 0x93, 0x00, 0xc9, 0x78, 0xa7, 0xbd, 0x9e, 0x78, 0x7e, 0x5d, - 0xf7, 0xcc, 0xda, 0x96, 0x40, 0x8a, 0x40, 0x11, 0x10, 0x09, 0xc2, 0x03, 0x0a, 0x4f, 0x08, 0x21, - 0x94, 0x07, 0x78, 0x80, 0x37, 0xf8, 0x17, 0xf2, 0x82, 0x94, 0x27, 0x14, 0x09, 0x69, 0x45, 0x96, - 0x7f, 0x00, 0x09, 0x10, 0xc2, 0x0f, 0x08, 0xf5, 0x8f, 0xe9, 0xe9, 0x9d, 0xdd, 0xbd, 0x3b, 0xd9, - 0xeb, 0xe4, 0xcd, 0xae, 0xaa, 0xae, 0x4f, 0x75, 0x75, 0x75, 0x55, 0x75, 0xcd, 0x02, 0x6b, 0xff, - 0x06, 0x2d, 0x39, 0xc1, 0xc2, 0x7e, 0xbc, 0x83, 0x89, 0x8f, 0x23, 0x4c, 0x17, 0xea, 0xd8, 0xb7, - 0x03, 0xb2, 0x20, 0x19, 0x56, 0xe8, 0xe0, 0xc3, 0x08, 0xfb, 0xd4, 0x09, 0x7c, 0xfa, 0x8c, 0x15, - 0x3a, 0x14, 0x93, 0x3a, 0x26, 0x0b, 0xe1, 0x7e, 0x8d, 0xf1, 0x68, 0xab, 0xc0, 0x42, 0xfd, 0xb9, - 0x85, 0x1a, 0xf6, 0x31, 0xb1, 0x22, 0x6c, 0x97, 0x42, 0x12, 0x44, 0x01, 0xbc, 0x21, 0x34, 0x95, - 0x5a, 0x04, 0xdf, 0x52, 0x9a, 0x4a, 0xe1, 0x7e, 0x8d, 0xf1, 0x68, 0xab, 0x40, 0xa9, 0xfe, 0xdc, - 0xcc, 0x33, 0x35, 0x27, 0xda, 0x8b, 0x77, 0x4a, 0xd5, 0xc0, 0x5b, 0xa8, 0x05, 0xb5, 0x60, 0x81, - 0x2b, 0xdc, 0x89, 0x77, 0xf9, 0x7f, 0xfc, 0x1f, 0xfe, 0x97, 0x00, 0x9a, 0x79, 0x3e, 0x35, 0xd9, + 0x67, 0x9f, 0xad, 0xde, 0xbb, 0x8b, 0x93, 0x20, 0x92, 0xf1, 0x4e, 0x7b, 0x3d, 0xf1, 0xfc, 0xba, + 0xee, 0x99, 0xb5, 0x2d, 0x81, 0x14, 0x81, 0x22, 0x20, 0x12, 0x84, 0x07, 0x14, 0x9e, 0x10, 0x42, + 0x28, 0x48, 0xf0, 0x00, 0x6f, 0xf0, 0x2f, 0xe4, 0x05, 0x29, 0x4f, 0x28, 0x12, 0xd2, 0x8a, 0x2c, + 0xff, 0x00, 0x12, 0x20, 0x84, 0x1f, 0x10, 0xea, 0x1f, 0xd3, 0xd3, 0x3b, 0xbb, 0x7b, 0x77, 0xb2, + 0xd7, 0xc9, 0xdb, 0x6e, 0x55, 0x75, 0x7d, 0xaa, 0xab, 0xab, 0xab, 0xab, 0xab, 0x07, 0x58, 0xfb, + 0x37, 0x68, 0xc9, 0x09, 0x16, 0xf6, 0xe3, 0x1d, 0x4c, 0x7c, 0x1c, 0x61, 0xba, 0x50, 0xc7, 0xbe, + 0x1d, 0x90, 0x05, 0xc9, 0xb0, 0x42, 0x07, 0x1f, 0x46, 0xd8, 0xa7, 0x4e, 0xe0, 0xd3, 0x67, 0xac, + 0xd0, 0xa1, 0x98, 0xd4, 0x31, 0x59, 0x08, 0xf7, 0x6b, 0x8c, 0x47, 0x5b, 0x05, 0x16, 0xea, 0xcf, + 0x2d, 0xd4, 0xb0, 0x8f, 0x89, 0x15, 0x61, 0xbb, 0x14, 0x92, 0x20, 0x0a, 0xe0, 0x0d, 0xa1, 0xa9, + 0xd4, 0x22, 0xf8, 0x96, 0xd2, 0x54, 0x0a, 0xf7, 0x6b, 0x8c, 0x47, 0x5b, 0x05, 0x4a, 0xf5, 0xe7, + 0x66, 0x9e, 0xa9, 0x39, 0xd1, 0x5e, 0xbc, 0x53, 0xaa, 0x06, 0xde, 0x42, 0x2d, 0xa8, 0x05, 0x0b, + 0x5c, 0xe1, 0x4e, 0xbc, 0xcb, 0xff, 0xf1, 0x3f, 0xfc, 0x97, 0x00, 0x9a, 0x79, 0x3e, 0x35, 0xd9, 0xb3, 0xaa, 0x7b, 0x8e, 0x8f, 0xc9, 0x51, 0x6a, 0xa7, 0x87, 0x23, 0xab, 0x83, 0x79, 0x33, 0x0b, - 0xdd, 0x56, 0x91, 0xd8, 0x8f, 0x1c, 0x0f, 0xb7, 0x2d, 0xf8, 0xca, 0xc3, 0x16, 0xd0, 0xea, 0x1e, - 0xf6, 0xac, 0xec, 0xba, 0xe2, 0xb1, 0x01, 0x26, 0x97, 0x03, 0xbf, 0x8e, 0x09, 0xdb, 0x20, 0xc2, + 0xdd, 0x46, 0x91, 0xd8, 0x8f, 0x1c, 0x0f, 0xb7, 0x0d, 0xf8, 0xca, 0xc3, 0x06, 0xd0, 0xea, 0x1e, + 0xf6, 0xac, 0xec, 0xb8, 0xe2, 0xb1, 0x01, 0x26, 0x97, 0x03, 0xbf, 0x8e, 0x09, 0x9b, 0x20, 0xc2, 0xf7, 0x63, 0x4c, 0x23, 0x58, 0x06, 0x7d, 0xb1, 0x63, 0x9b, 0xc6, 0x9c, 0x31, 0x3f, 0x5c, 0x7e, 0xf6, 0xe3, 0x46, 0xe1, 0x5c, 0xb3, 0x51, 0xe8, 0xbb, 0xbb, 0xb6, 0x72, 0xdc, 0x28, 0x5c, 0xed, - 0x86, 0x14, 0x1d, 0x85, 0x98, 0x96, 0xee, 0xae, 0xad, 0x20, 0xb6, 0x18, 0xbe, 0x0c, 0x26, 0x6d, + 0x86, 0x14, 0x1d, 0x85, 0x98, 0x96, 0xee, 0xae, 0xad, 0x20, 0x36, 0x18, 0xbe, 0x0c, 0x26, 0x6d, 0x4c, 0x1d, 0x82, 0xed, 0xa5, 0xad, 0xb5, 0x7b, 0x42, 0xbf, 0x99, 0xe3, 0x1a, 0x2f, 0x49, 0x8d, - 0x93, 0x2b, 0x59, 0x01, 0xd4, 0xbe, 0x06, 0x6e, 0x83, 0xa1, 0x60, 0xe7, 0x1d, 0x5c, 0x8d, 0xa8, - 0xd9, 0x37, 0xd7, 0x37, 0x3f, 0xb2, 0xf8, 0x4c, 0x29, 0x3d, 0x3c, 0x65, 0x02, 0x3f, 0x31, 0xb9, - 0xd9, 0x12, 0xb2, 0x0e, 0x56, 0x93, 0x43, 0x2b, 0x8f, 0x4b, 0xb4, 0xa1, 0x4d, 0xa1, 0x05, 0x25, - 0xea, 0x8a, 0xbf, 0xce, 0x01, 0xa8, 0x6f, 0x9e, 0x86, 0x81, 0x4f, 0x71, 0x4f, 0x76, 0x4f, 0xc1, + 0x93, 0x2b, 0x59, 0x01, 0xd4, 0x3e, 0x06, 0x6e, 0x83, 0xa1, 0x60, 0xe7, 0x1d, 0x5c, 0x8d, 0xa8, + 0xd9, 0x37, 0xd7, 0x37, 0x3f, 0xb2, 0xf8, 0x4c, 0x29, 0x5d, 0x3c, 0x65, 0x02, 0x5f, 0x31, 0x39, + 0xd9, 0x12, 0xb2, 0x0e, 0x56, 0x93, 0x45, 0x2b, 0x8f, 0x4b, 0xb4, 0xa1, 0x4d, 0xa1, 0x05, 0x25, + 0xea, 0x8a, 0xbf, 0xca, 0x01, 0xa8, 0x4f, 0x9e, 0x86, 0x81, 0x4f, 0x71, 0x4f, 0x66, 0x4f, 0xc1, 0x44, 0x95, 0x6b, 0x8e, 0xb0, 0x2d, 0x71, 0xcd, 0xdc, 0x49, 0xac, 0x37, 0x25, 0xfe, 0xc4, 0x72, 0x46, 0x1d, 0x6a, 0x03, 0x80, 0x77, 0xc0, 0x20, 0xc1, 0x34, 0x76, 0x23, 0xb3, 0x6f, 0xce, 0x98, 0x1f, 0x59, 0xbc, 0xde, 0x15, 0x8a, 0x87, 0x36, 0x0b, 0xbe, 0x52, 0xfd, 0xb9, 0x52, 0x25, 0xb2, 0xa2, 0x98, 0x96, 0xcf, 0x4b, 0xa4, 0x41, 0xc4, 0x75, 0x20, 0xa9, 0xab, 0xf8, 0x5f, 0x03, 0x4c, 0xe8, 0x5e, 0xaa, 0x3b, 0xf8, 0x00, 0x12, 0x30, 0x44, 0x44, 0xb0, 0x70, 0x3f, 0x8d, 0x2c, 0xde, - 0x2a, 0x9d, 0xf4, 0x46, 0x95, 0xda, 0xe2, 0xaf, 0x3c, 0xc2, 0x8e, 0x4b, 0xfe, 0x83, 0x12, 0x20, - 0x58, 0x07, 0x79, 0x22, 0xcf, 0x88, 0x07, 0xd2, 0xc8, 0xe2, 0x7a, 0x6f, 0x40, 0x85, 0xce, 0xf2, - 0x68, 0xb3, 0x51, 0xc8, 0x27, 0xff, 0x21, 0x85, 0x55, 0xfc, 0x65, 0x0e, 0xcc, 0x2e, 0xc7, 0x34, - 0x0a, 0x3c, 0x84, 0x69, 0x10, 0x93, 0x2a, 0x5e, 0x0e, 0xdc, 0xd8, 0xf3, 0x57, 0xf0, 0xae, 0xe3, - 0x3b, 0x11, 0x8b, 0xd1, 0x39, 0xd0, 0xef, 0x5b, 0x1e, 0x96, 0x31, 0x33, 0x2a, 0x3d, 0xd9, 0x7f, - 0xdb, 0xf2, 0x30, 0xe2, 0x1c, 0x26, 0xc1, 0x42, 0x44, 0xde, 0x00, 0x25, 0x71, 0xe7, 0x28, 0xc4, - 0x88, 0x73, 0xe0, 0x35, 0x30, 0xb8, 0x1b, 0x10, 0xcf, 0x12, 0xa7, 0x37, 0x9c, 0x9e, 0xc7, 0x4d, - 0x4e, 0x45, 0x92, 0x0b, 0x5f, 0x00, 0x23, 0x36, 0xa6, 0x55, 0xe2, 0x84, 0x0c, 0xda, 0xec, 0xe7, - 0xc2, 0x17, 0xa4, 0xf0, 0xc8, 0x4a, 0xca, 0x42, 0xba, 0x1c, 0xbc, 0x0e, 0xf2, 0x21, 0x71, 0x02, - 0xe2, 0x44, 0x47, 0xe6, 0xc0, 0x9c, 0x31, 0x3f, 0x50, 0x9e, 0x90, 0x6b, 0xf2, 0x5b, 0x92, 0x8e, - 0x94, 0x04, 0x93, 0x7e, 0x87, 0x06, 0xfe, 0x96, 0x15, 0xed, 0x99, 0x83, 0x1c, 0x41, 0x49, 0xbf, - 0x5a, 0xd9, 0xbc, 0xcd, 0xe8, 0x48, 0x49, 0x14, 0xff, 0x6c, 0x00, 0x33, 0xeb, 0xa1, 0xc4, 0xbd, - 0xf0, 0x26, 0xc8, 0xd3, 0x88, 0xe5, 0x9c, 0xda, 0x91, 0xf4, 0xcf, 0x53, 0x89, 0xaa, 0x8a, 0xa4, - 0x1f, 0x37, 0x0a, 0xd3, 0xe9, 0x8a, 0x84, 0xca, 0x7d, 0xa3, 0xd6, 0xb2, 0x90, 0x3b, 0xc0, 0x3b, - 0x7b, 0x41, 0xb0, 0x2f, 0x4f, 0xff, 0x14, 0x21, 0xf7, 0x9a, 0x50, 0x94, 0x62, 0x8a, 0x90, 0x93, - 0x64, 0x94, 0x00, 0x15, 0xff, 0x93, 0xcb, 0x6e, 0x4c, 0x3b, 0xf4, 0xb7, 0x41, 0x9e, 0x5d, 0x21, - 0xdb, 0x8a, 0x2c, 0x79, 0x09, 0x9e, 0x7d, 0xb4, 0x0b, 0x27, 0xee, 0xeb, 0x06, 0x8e, 0xac, 0x32, - 0x94, 0xae, 0x00, 0x29, 0x0d, 0x29, 0xad, 0xf0, 0x10, 0xf4, 0xd3, 0x10, 0x57, 0xe5, 0x7e, 0xef, - 0x9d, 0x22, 0xda, 0xbb, 0xec, 0xa1, 0x12, 0xe2, 0x6a, 0x1a, 0x8c, 0xec, 0x3f, 0xc4, 0x11, 0xe1, - 0xbb, 0x06, 0x18, 0xa4, 0x3c, 0x2f, 0xc8, 0x5c, 0xb2, 0x7d, 0x06, 0xe0, 0x99, 0xbc, 0x23, 0xfe, - 0x47, 0x12, 0xb7, 0xf8, 0xcf, 0x1c, 0xb8, 0xda, 0x6d, 0xe9, 0x72, 0xe0, 0xdb, 0xe2, 0x10, 0xd6, - 0xe4, 0xbd, 0x12, 0x91, 0xf5, 0x82, 0x7e, 0xaf, 0x8e, 0x1b, 0x85, 0x27, 0x1e, 0xaa, 0x40, 0xbb, - 0x80, 0x5f, 0x55, 0x5b, 0x16, 0x97, 0xf4, 0x6a, 0xab, 0x61, 0xc7, 0x8d, 0xc2, 0xb8, 0x5a, 0xd6, - 0x6a, 0x2b, 0xac, 0x03, 0xe8, 0x5a, 0x34, 0xba, 0x43, 0x2c, 0x9f, 0x0a, 0xb5, 0x8e, 0x87, 0xa5, - 0xe7, 0x9e, 0x7a, 0xb4, 0xa0, 0x60, 0x2b, 0xca, 0x33, 0x12, 0x12, 0xae, 0xb7, 0x69, 0x43, 0x1d, - 0x10, 0x58, 0xce, 0x20, 0xd8, 0xa2, 0x2a, 0x0d, 0x68, 0x39, 0x9c, 0x51, 0x91, 0xe4, 0xc2, 0x27, - 0xc1, 0x90, 0x87, 0x29, 0xb5, 0x6a, 0x98, 0xdf, 0xfd, 0xe1, 0xb4, 0x28, 0x6e, 0x08, 0x32, 0x4a, - 0xf8, 0xc5, 0x7f, 0x19, 0xe0, 0x72, 0x37, 0xaf, 0xad, 0x3b, 0x34, 0x82, 0xdf, 0x6c, 0x0b, 0xfb, - 0xd2, 0xa3, 0xed, 0x90, 0xad, 0xe6, 0x41, 0xaf, 0x52, 0x49, 0x42, 0xd1, 0x42, 0xfe, 0x00, 0x0c, - 0x38, 0x11, 0xf6, 0x92, 0x6a, 0x89, 0x7a, 0x1f, 0x76, 0xe5, 0x31, 0x09, 0x3f, 0xb0, 0xc6, 0x80, - 0x90, 0xc0, 0x2b, 0x7e, 0x94, 0x03, 0x57, 0xba, 0x2d, 0x61, 0x79, 0x9c, 0x32, 0x67, 0x87, 0x6e, - 0x4c, 0x2c, 0x57, 0x06, 0x9b, 0x72, 0xf6, 0x16, 0xa7, 0x22, 0xc9, 0x65, 0xb9, 0x93, 0x3a, 0x7e, - 0x2d, 0x76, 0x2d, 0x22, 0x23, 0x49, 0x6d, 0xb8, 0x22, 0xe9, 0x48, 0x49, 0xc0, 0x12, 0x00, 0x74, - 0x2f, 0x20, 0x11, 0xc7, 0xe0, 0x1d, 0xce, 0x70, 0xf9, 0x3c, 0xcb, 0x08, 0x15, 0x45, 0x45, 0x9a, - 0x04, 0x2b, 0x24, 0xfb, 0x8e, 0x6f, 0xcb, 0x03, 0x57, 0x77, 0xf7, 0x96, 0xe3, 0xdb, 0x88, 0x73, - 0x18, 0xbe, 0xeb, 0xd0, 0x88, 0x51, 0xe4, 0x69, 0xb7, 0x38, 0x9c, 0x4b, 0x2a, 0x09, 0x86, 0x5f, - 0x65, 0x09, 0x36, 0x20, 0x0e, 0xa6, 0xe6, 0x60, 0x8a, 0xbf, 0xac, 0xa8, 0x48, 0x93, 0x28, 0xfe, - 0xa5, 0xbf, 0x7b, 0x7c, 0xb0, 0x04, 0x02, 0x1f, 0x07, 0x03, 0x35, 0x12, 0xc4, 0xa1, 0xf4, 0x92, - 0xf2, 0xf6, 0xcb, 0x8c, 0x88, 0x04, 0x0f, 0x7e, 0x07, 0x0c, 0xf8, 0x72, 0xc3, 0x2c, 0x82, 0x5e, - 0xeb, 0xfd, 0x31, 0x73, 0x6f, 0xa5, 0xe8, 0xc2, 0x91, 0x02, 0x14, 0x3e, 0x0f, 0x06, 0x68, 0x35, - 0x08, 0xb1, 0x74, 0xe2, 0x6c, 0x22, 0x54, 0x61, 0xc4, 0xe3, 0x46, 0x61, 0x2c, 0x51, 0xc7, 0x09, - 0x48, 0x08, 0xc3, 0x1f, 0x18, 0x20, 0x2f, 0xcb, 0x05, 0x35, 0x87, 0x78, 0x78, 0xbe, 0xde, 0x7b, - 0xbb, 0x65, 0xdb, 0x9b, 0x9e, 0x99, 0x24, 0x50, 0xa4, 0xc0, 0xe1, 0xf7, 0x0c, 0x00, 0xaa, 0xaa, - 0x76, 0x99, 0xc3, 0xdc, 0x87, 0x3d, 0xbb, 0x2a, 0x5a, 0x55, 0x14, 0x81, 0x90, 0xb6, 0x4a, 0x1a, - 0x2a, 0xac, 0x80, 0xa9, 0x90, 0x60, 0xae, 0xfb, 0xae, 0xbf, 0xef, 0x07, 0x07, 0xfe, 0x4d, 0x07, - 0xbb, 0x36, 0x35, 0xc1, 0x9c, 0x31, 0x9f, 0x2f, 0x5f, 0x91, 0xf6, 0x4f, 0x6d, 0x75, 0x12, 0x42, - 0x9d, 0xd7, 0x16, 0xdf, 0xeb, 0xcb, 0xf6, 0x5a, 0xd9, 0x7a, 0x01, 0x3f, 0x10, 0x9b, 0x17, 0x79, - 0x98, 0x9a, 0x06, 0x3f, 0x88, 0x37, 0x7b, 0x7f, 0x10, 0x2a, 0xd7, 0xa7, 0x45, 0x5a, 0x91, 0x28, - 0xd2, 0x4c, 0x80, 0x3f, 0x33, 0xc0, 0x98, 0x55, 0xad, 0xe2, 0x30, 0xc2, 0xb6, 0xb8, 0xc6, 0xb9, - 0xb3, 0x8d, 0xea, 0x29, 0x69, 0xd0, 0xd8, 0x92, 0x8e, 0x8a, 0x5a, 0x8d, 0x80, 0x2f, 0x81, 0xf3, - 0x34, 0x0a, 0x08, 0xb6, 0x93, 0x08, 0x92, 0xd9, 0x05, 0x36, 0x1b, 0x85, 0xf3, 0x95, 0x16, 0x0e, - 0xca, 0x48, 0x16, 0x3f, 0x19, 0x00, 0x85, 0x87, 0x44, 0xe8, 0x23, 0x34, 0xbd, 0xd7, 0xc0, 0x20, - 0xdf, 0xa9, 0xcd, 0x1d, 0x92, 0xd7, 0x4a, 0x3d, 0xa7, 0x22, 0xc9, 0x65, 0xe5, 0x89, 0xe1, 0xb3, - 0xf2, 0xd4, 0xc7, 0x05, 0x55, 0x79, 0xaa, 0x08, 0x32, 0x4a, 0xf8, 0x70, 0x11, 0x00, 0x1b, 0x87, - 0x04, 0xb3, 0x8c, 0x64, 0x9b, 0x43, 0x5c, 0x5a, 0x9d, 0xcf, 0x8a, 0xe2, 0x20, 0x4d, 0x0a, 0xde, - 0x04, 0x30, 0xf9, 0xcf, 0x09, 0xfc, 0xd7, 0x2c, 0xe2, 0x3b, 0x7e, 0xcd, 0xcc, 0x73, 0xb3, 0xa7, - 0x59, 0xb5, 0x5d, 0x69, 0xe3, 0xa2, 0x0e, 0x2b, 0x60, 0x1d, 0x0c, 0x8a, 0x67, 0x34, 0xcf, 0x1b, - 0x3d, 0xbc, 0x71, 0xf7, 0x2c, 0xd7, 0xb1, 0x39, 0x54, 0x19, 0x70, 0xf7, 0x70, 0x14, 0x24, 0xd1, - 0xe0, 0xfb, 0x06, 0x18, 0xa5, 0xf1, 0x0e, 0x91, 0xd2, 0x94, 0x67, 0xf5, 0x91, 0xc5, 0x3b, 0xbd, - 0x82, 0xaf, 0x68, 0xba, 0xcb, 0x13, 0xcd, 0x46, 0x61, 0x54, 0xa7, 0xa0, 0x16, 0x6c, 0xf8, 0x07, - 0x03, 0x98, 0x96, 0x2d, 0x42, 0xdf, 0x72, 0xb7, 0x88, 0xe3, 0x47, 0x98, 0x88, 0x07, 0x91, 0x28, - 0x1f, 0x3d, 0xec, 0x15, 0xb3, 0xef, 0xac, 0xf2, 0x9c, 0x3c, 0x69, 0x73, 0xa9, 0x8b, 0x05, 0xa8, - 0xab, 0x6d, 0xc5, 0x7f, 0x1b, 0xd9, 0xd4, 0xa2, 0xed, 0xb2, 0x52, 0xb5, 0x5c, 0x0c, 0x57, 0xc0, - 0x04, 0xeb, 0x7e, 0x11, 0x0e, 0x5d, 0xa7, 0x6a, 0x51, 0xfe, 0xfa, 0x11, 0xd1, 0xad, 0x9e, 0xe1, - 0x95, 0x0c, 0x1f, 0xb5, 0xad, 0x80, 0xaf, 0x02, 0x28, 0xda, 0xc2, 0x16, 0x3d, 0xa2, 0x13, 0x50, - 0x0d, 0x5e, 0xa5, 0x4d, 0x02, 0x75, 0x58, 0x05, 0x97, 0xc1, 0xa4, 0x6b, 0xed, 0x60, 0xb7, 0x82, - 0x5d, 0x5c, 0x8d, 0x02, 0xc2, 0x55, 0x89, 0xf7, 0xe1, 0x54, 0xb3, 0x51, 0x98, 0x5c, 0xcf, 0x32, - 0x51, 0xbb, 0x7c, 0xf1, 0x6a, 0xf6, 0x2e, 0xeb, 0x1b, 0x17, 0xcd, 0xf6, 0x87, 0x39, 0x30, 0xd3, - 0x3d, 0x28, 0xe0, 0x77, 0x55, 0x6b, 0x2c, 0x3a, 0xbe, 0xd7, 0xcf, 0x20, 0xf4, 0xe4, 0x73, 0x00, - 0xb4, 0x3f, 0x05, 0xe0, 0x11, 0xab, 0xd7, 0x96, 0x9b, 0x3c, 0xfb, 0xb7, 0xcf, 0x02, 0x9d, 0xe9, - 0x2f, 0x0f, 0x8b, 0x2e, 0xc0, 0x72, 0x79, 0xd1, 0xb7, 0x5c, 0x5c, 0xfc, 0xa8, 0xed, 0x69, 0x9b, - 0x5e, 0x56, 0xf8, 0x43, 0x03, 0x8c, 0x07, 0x21, 0xf6, 0x97, 0xb6, 0xd6, 0xee, 0x7d, 0x59, 0x5c, - 0x5a, 0xe9, 0xa0, 0xb5, 0x93, 0x9b, 0xc8, 0xde, 0xd7, 0x42, 0xd7, 0x16, 0x09, 0x42, 0x5a, 0xbe, - 0xd0, 0x6c, 0x14, 0xc6, 0x37, 0x5b, 0x51, 0x50, 0x16, 0xb6, 0xe8, 0x81, 0xa9, 0xd5, 0xc3, 0x08, - 0x13, 0xdf, 0x72, 0x57, 0x82, 0x6a, 0xec, 0x61, 0x3f, 0x12, 0x36, 0x66, 0xc6, 0x05, 0xc6, 0x23, - 0x8e, 0x0b, 0xae, 0x80, 0xbe, 0x98, 0xb8, 0x32, 0x6a, 0x47, 0xd4, 0x10, 0x0c, 0xad, 0x23, 0x46, - 0x2f, 0x5e, 0x05, 0xfd, 0xcc, 0x4e, 0x78, 0x09, 0xf4, 0x11, 0xeb, 0x80, 0x6b, 0x1d, 0x2d, 0x0f, - 0x31, 0x11, 0x64, 0x1d, 0x20, 0x46, 0x2b, 0xfe, 0x7d, 0x0e, 0x8c, 0x67, 0xf6, 0x02, 0x67, 0x40, - 0x4e, 0x4d, 0xd6, 0x80, 0x54, 0x9a, 0x5b, 0x5b, 0x41, 0x39, 0xc7, 0x86, 0x2f, 0xaa, 0xec, 0x2a, - 0x40, 0x0b, 0xaa, 0x58, 0x70, 0x2a, 0x6b, 0xcb, 0x52, 0x75, 0xcc, 0x90, 0x24, 0x3d, 0x32, 0x1b, - 0xf0, 0xae, 0xbc, 0x15, 0xc2, 0x06, 0xbc, 0x8b, 0x18, 0xed, 0xa4, 0xb3, 0x92, 0x64, 0x58, 0x33, - 0xf0, 0x08, 0xc3, 0x9a, 0xc1, 0x07, 0x0e, 0x6b, 0x1e, 0x07, 0x03, 0x91, 0x13, 0xb9, 0x98, 0x57, - 0x2a, 0xad, 0x19, 0xbe, 0xc3, 0x88, 0x48, 0xf0, 0x20, 0x06, 0x43, 0x36, 0xde, 0xb5, 0x62, 0x37, - 0xe2, 0x45, 0x69, 0x64, 0xf1, 0xeb, 0xa7, 0x8b, 0x1e, 0x31, 0xcc, 0x58, 0x11, 0x2a, 0x51, 0xa2, - 0x1b, 0x3e, 0x01, 0x86, 0x3c, 0xeb, 0xd0, 0xf1, 0x62, 0x8f, 0x77, 0x8c, 0x86, 0x10, 0xdb, 0x10, - 0x24, 0x94, 0xf0, 0x58, 0x12, 0xc4, 0x87, 0x55, 0x37, 0xa6, 0x4e, 0x1d, 0x4b, 0xa6, 0x6c, 0xe9, - 0x54, 0x12, 0x5c, 0xcd, 0xf0, 0x51, 0xdb, 0x0a, 0x0e, 0xe6, 0xf8, 0x7c, 0xf1, 0x88, 0x06, 0x26, - 0x48, 0x28, 0xe1, 0xb5, 0x82, 0x49, 0xf9, 0xd1, 0x6e, 0x60, 0x72, 0x71, 0xdb, 0x0a, 0xf8, 0x34, - 0x18, 0xf6, 0xac, 0xc3, 0x75, 0xec, 0xd7, 0xa2, 0x3d, 0x73, 0x6c, 0xce, 0x98, 0xef, 0x2b, 0x8f, - 0x35, 0x1b, 0x85, 0xe1, 0x8d, 0x84, 0x88, 0x52, 0x3e, 0x17, 0x76, 0x7c, 0x29, 0x7c, 0x5e, 0x13, - 0x4e, 0x88, 0x28, 0xe5, 0xb3, 0xce, 0x24, 0xb4, 0x22, 0x76, 0xaf, 0xcc, 0xf1, 0xd6, 0x87, 0xf3, - 0x96, 0x20, 0xa3, 0x84, 0x0f, 0xe7, 0x41, 0xde, 0xb3, 0x0e, 0xf9, 0x9b, 0xd2, 0x9c, 0xe0, 0x6a, - 0xf9, 0x40, 0x71, 0x43, 0xd2, 0x90, 0xe2, 0x72, 0x49, 0xc7, 0x17, 0x92, 0x93, 0x9a, 0xa4, 0xa4, - 0x21, 0xc5, 0x65, 0xf1, 0x1b, 0xfb, 0xce, 0xfd, 0x18, 0x0b, 0x61, 0xc8, 0x3d, 0xa3, 0xe2, 0xf7, - 0x6e, 0xca, 0x42, 0xba, 0x1c, 0x7b, 0xd3, 0x79, 0xb1, 0x1b, 0x39, 0xa1, 0x8b, 0x37, 0x77, 0xcd, - 0x0b, 0xdc, 0xff, 0xbc, 0x95, 0xdf, 0x50, 0x54, 0xa4, 0x49, 0xc0, 0xb7, 0x41, 0x3f, 0xf6, 0x63, - 0xcf, 0xbc, 0xc8, 0xcb, 0xf7, 0x69, 0xa3, 0x4f, 0xdd, 0x97, 0x55, 0x3f, 0xf6, 0x10, 0xd7, 0x0c, - 0x5f, 0x04, 0x63, 0x9e, 0x75, 0xc8, 0x92, 0x00, 0x26, 0x11, 0x7b, 0x68, 0x4e, 0xf1, 0x7d, 0x4f, - 0xb2, 0x26, 0x76, 0x43, 0x67, 0xa0, 0x56, 0x39, 0xbe, 0xd0, 0xf1, 0xb5, 0x85, 0xd3, 0xda, 0x42, - 0x9d, 0x81, 0x5a, 0xe5, 0x98, 0x93, 0x09, 0xbe, 0x1f, 0x3b, 0x04, 0xdb, 0xe6, 0xff, 0xf1, 0xbe, - 0x57, 0xce, 0x77, 0x05, 0x0d, 0x29, 0x2e, 0xbc, 0x9f, 0x8c, 0x1c, 0x4c, 0x7e, 0xf9, 0xb6, 0x7a, - 0x96, 0xba, 0x37, 0xc9, 0x12, 0x21, 0xd6, 0x91, 0xa8, 0x2a, 0xfa, 0xb0, 0x01, 0xfa, 0x60, 0xc0, - 0x72, 0xdd, 0xcd, 0x5d, 0xf3, 0x12, 0xf7, 0x78, 0x0f, 0xab, 0x85, 0xca, 0x30, 0x4b, 0x4c, 0x3f, - 0x12, 0x30, 0x0c, 0x2f, 0xf0, 0x59, 0x2c, 0xcc, 0x9c, 0x19, 0xde, 0x26, 0xd3, 0x8f, 0x04, 0x0c, - 0xdf, 0x9f, 0x7f, 0xb4, 0xb9, 0x6b, 0x3e, 0x76, 0x76, 0xfb, 0x63, 0xfa, 0x91, 0x80, 0x81, 0x36, - 0xe8, 0xf3, 0x83, 0xc8, 0xbc, 0xdc, 0xeb, 0xda, 0xcb, 0xab, 0xc9, 0xed, 0x20, 0x42, 0x4c, 0x3d, - 0xfc, 0xb1, 0x01, 0x40, 0x98, 0x46, 0xe2, 0x95, 0xd3, 0x8e, 0x00, 0x32, 0x68, 0xa5, 0x34, 0x7a, - 0x57, 0xfd, 0x88, 0x1c, 0xa5, 0xef, 0x1a, 0x2d, 0xca, 0x35, 0x03, 0xe0, 0x2f, 0x0c, 0x70, 0x51, - 0x6f, 0x77, 0x95, 0x65, 0xb3, 0xdc, 0x0f, 0x9b, 0x3d, 0x0c, 0xe4, 0x72, 0x10, 0xb8, 0x65, 0xb3, - 0xd9, 0x28, 0x5c, 0x5c, 0xea, 0x00, 0x88, 0x3a, 0x9a, 0x01, 0x7f, 0x63, 0x80, 0x49, 0x99, 0x1d, - 0x35, 0xe3, 0x0a, 0xdc, 0x6d, 0x6f, 0xf7, 0xd0, 0x6d, 0x59, 0x08, 0xe1, 0x3d, 0xf5, 0x95, 0xb1, - 0x8d, 0x8f, 0xda, 0xad, 0x82, 0xbf, 0x37, 0xc0, 0xa8, 0x8d, 0x43, 0xec, 0xdb, 0xd8, 0xaf, 0x32, - 0x33, 0xe7, 0x4e, 0x3b, 0x57, 0xc8, 0x9a, 0xb9, 0xa2, 0x69, 0x17, 0x16, 0x96, 0xa4, 0x85, 0xa3, - 0x3a, 0xeb, 0xb8, 0x51, 0x98, 0x4e, 0x97, 0xea, 0x1c, 0xd4, 0x62, 0x20, 0xfc, 0x89, 0x01, 0xc6, - 0x53, 0xb7, 0x8b, 0x02, 0x71, 0xf5, 0x6c, 0x0e, 0x9e, 0xb7, 0xa0, 0x4b, 0xad, 0x58, 0x28, 0x0b, - 0x0e, 0x7f, 0x6b, 0xb0, 0x6e, 0x2b, 0x79, 0xab, 0x51, 0xb3, 0xc8, 0x3d, 0xf8, 0x46, 0x2f, 0x3d, - 0xa8, 0x94, 0x0b, 0x07, 0x5e, 0x4f, 0x3b, 0x39, 0xc5, 0x39, 0x6e, 0x14, 0xa6, 0x74, 0xff, 0x29, - 0x06, 0xd2, 0x8d, 0x83, 0xef, 0x19, 0x60, 0x14, 0xa7, 0x0d, 0x33, 0x35, 0x1f, 0x3f, 0xad, 0xeb, - 0x3a, 0xb6, 0xdf, 0xe2, 0x39, 0xad, 0xb1, 0x28, 0x6a, 0x81, 0x65, 0xbd, 0x1f, 0x3e, 0xb4, 0xbc, - 0xd0, 0xc5, 0xe6, 0xff, 0xf7, 0xae, 0xf7, 0x5b, 0x15, 0x2a, 0x51, 0xa2, 0x1b, 0x5e, 0x07, 0x79, - 0x3f, 0x76, 0x5d, 0x6b, 0xc7, 0xc5, 0xe6, 0x13, 0xbc, 0x8b, 0x50, 0xf3, 0xc5, 0xdb, 0x92, 0x8e, - 0x94, 0x04, 0xdc, 0x05, 0x73, 0x87, 0xb7, 0xd4, 0x8f, 0x2f, 0x3a, 0x0e, 0xf0, 0xcc, 0x6b, 0x5c, - 0xcb, 0x4c, 0xb3, 0x51, 0x98, 0xde, 0xee, 0x3c, 0xe2, 0x7b, 0xa8, 0x0e, 0xf8, 0x26, 0x78, 0x4c, - 0x93, 0x59, 0xf5, 0x76, 0xb0, 0x6d, 0x63, 0x3b, 0x79, 0x68, 0x99, 0x5f, 0xe2, 0x10, 0xea, 0x1e, - 0x6f, 0x67, 0x05, 0xd0, 0x83, 0x56, 0xc3, 0x75, 0x30, 0xad, 0xb1, 0xd7, 0xfc, 0x68, 0x93, 0x54, - 0x22, 0xe2, 0xf8, 0x35, 0x73, 0x9e, 0xeb, 0xbd, 0x98, 0xdc, 0xbe, 0x6d, 0x8d, 0x87, 0xba, 0xac, - 0x81, 0xaf, 0xb4, 0x68, 0xe3, 0x1f, 0x2e, 0xac, 0xf0, 0x16, 0x3e, 0xa2, 0xe6, 0x93, 0xbc, 0xb9, - 0xe0, 0xe7, 0xbc, 0xad, 0xd1, 0x51, 0x17, 0x79, 0xf8, 0x0d, 0x70, 0x21, 0xc3, 0x61, 0xef, 0x0a, - 0xf3, 0x29, 0xf1, 0x40, 0x60, 0x9d, 0xe8, 0x76, 0x42, 0x44, 0x9d, 0x24, 0xe1, 0xd7, 0x00, 0xd4, - 0xc8, 0x1b, 0x56, 0xc8, 0xd7, 0x3f, 0x2d, 0xde, 0x2a, 0xec, 0x44, 0xb7, 0x25, 0x0d, 0x75, 0x90, - 0x83, 0x1f, 0x1a, 0x2d, 0x3b, 0x49, 0x5f, 0xb3, 0xd4, 0xbc, 0xce, 0x2f, 0xec, 0x2b, 0x27, 0x0f, - 0xc0, 0x54, 0x19, 0x8a, 0x5d, 0xac, 0x79, 0x58, 0x43, 0x41, 0x5d, 0xd0, 0x67, 0xd8, 0x63, 0x3a, - 0x93, 0xc3, 0xe1, 0x04, 0xe8, 0xdb, 0xc7, 0xf2, 0xb3, 0x31, 0x62, 0x7f, 0xc2, 0xb7, 0xc0, 0x40, - 0xdd, 0x72, 0xe3, 0x64, 0x14, 0xd0, 0xbb, 0x5a, 0x8f, 0x84, 0xde, 0x97, 0x72, 0x37, 0x8c, 0x99, - 0x0f, 0x0c, 0x30, 0xdd, 0xb9, 0xaa, 0x7c, 0x51, 0x16, 0xfd, 0xdc, 0x00, 0x93, 0x6d, 0x05, 0xa4, - 0x83, 0x31, 0x6e, 0xab, 0x31, 0xf7, 0x7a, 0x58, 0x09, 0xc4, 0x45, 0xe0, 0x1d, 0xad, 0x6e, 0xd9, - 0x8f, 0x0c, 0x30, 0x91, 0x4d, 0xcc, 0x5f, 0x90, 0x97, 0x8a, 0xef, 0xe7, 0xc0, 0x74, 0xe7, 0x1e, - 0x1c, 0x7a, 0x6a, 0xba, 0xd0, 0xf3, 0x01, 0x4d, 0xa7, 0x91, 0xed, 0xbb, 0x06, 0x18, 0x79, 0x47, - 0xc9, 0x25, 0x5f, 0x33, 0x7b, 0x39, 0x15, 0x4a, 0x4a, 0x5f, 0xca, 0xa0, 0x48, 0x87, 0x2c, 0xfe, - 0xce, 0x00, 0x53, 0x1d, 0xcb, 0x39, 0xbc, 0x06, 0x06, 0x2d, 0xd7, 0x0d, 0x0e, 0xc4, 0x34, 0x4f, - 0x1b, 0xcb, 0x2f, 0x71, 0x2a, 0x92, 0x5c, 0xcd, 0x67, 0xb9, 0xcf, 0xc1, 0x67, 0xc5, 0x3f, 0x1a, - 0xe0, 0xf2, 0x83, 0xa2, 0xee, 0xf3, 0x3e, 0xc3, 0x79, 0x90, 0x97, 0xcd, 0xf6, 0x11, 0x3f, 0x3f, - 0x99, 0x5d, 0x65, 0x46, 0xe0, 0xbf, 0x96, 0x11, 0x7f, 0x15, 0x7f, 0x65, 0x80, 0x89, 0x0a, 0x26, - 0x75, 0xa7, 0x8a, 0x11, 0xde, 0xc5, 0x04, 0xfb, 0x55, 0x0c, 0x17, 0xc0, 0x30, 0xff, 0xda, 0x18, - 0x5a, 0xd5, 0xe4, 0x1b, 0xc9, 0xa4, 0x74, 0xf4, 0xf0, 0xed, 0x84, 0x81, 0x52, 0x19, 0xf5, 0x3d, - 0x25, 0xd7, 0xf5, 0x7b, 0xca, 0x65, 0xd0, 0x1f, 0xa6, 0x03, 0xe0, 0x3c, 0xe3, 0xf2, 0x99, 0x2f, - 0xa7, 0x72, 0x6e, 0x40, 0x22, 0x3e, 0xe5, 0x1a, 0x90, 0xdc, 0x80, 0x44, 0x88, 0x53, 0x8b, 0xdf, - 0x02, 0xe7, 0x5b, 0xd3, 0x33, 0xc3, 0x23, 0xb1, 0xdb, 0xf6, 0xfd, 0x86, 0xf1, 0x10, 0xe7, 0xe8, - 0x3f, 0x1b, 0xc8, 0x3d, 0xe4, 0x67, 0x03, 0x7f, 0x32, 0xc0, 0x85, 0xe4, 0x57, 0x35, 0xae, 0x83, - 0xfd, 0x68, 0x39, 0xf0, 0x77, 0x9d, 0x1a, 0xbc, 0x24, 0xe6, 0x88, 0xda, 0x70, 0x2e, 0x99, 0x21, - 0xc2, 0xfb, 0x60, 0x88, 0x0a, 0xa7, 0xc9, 0xf3, 0x7c, 0xf5, 0xe4, 0xe7, 0x99, 0xf5, 0xbe, 0x68, - 0x83, 0x12, 0x6a, 0x82, 0xc3, 0x8e, 0xb4, 0x6a, 0x95, 0x63, 0xdf, 0x96, 0xb3, 0xe4, 0x51, 0x71, - 0xa4, 0xcb, 0x4b, 0x82, 0x86, 0x14, 0xb7, 0xf8, 0x0f, 0x03, 0x4c, 0xb6, 0xfd, 0x4a, 0x08, 0x7e, - 0xdf, 0x00, 0xa3, 0x55, 0x6d, 0x7b, 0xf2, 0x62, 0x6c, 0x9c, 0xfe, 0x97, 0x48, 0x9a, 0x52, 0xd1, - 0x4b, 0xe8, 0x14, 0xd4, 0x02, 0x0a, 0xb7, 0x81, 0x59, 0xcd, 0xfc, 0x20, 0x2f, 0xf3, 0x89, 0xef, - 0x72, 0xb3, 0x51, 0x30, 0x97, 0xbb, 0xc8, 0xa0, 0xae, 0xab, 0xcb, 0xdf, 0xfe, 0xf8, 0xb3, 0xd9, - 0x73, 0x9f, 0x7c, 0x36, 0x7b, 0xee, 0xd3, 0xcf, 0x66, 0xcf, 0xbd, 0xdb, 0x9c, 0x35, 0x3e, 0x6e, - 0xce, 0x1a, 0x9f, 0x34, 0x67, 0x8d, 0x4f, 0x9b, 0xb3, 0xc6, 0x5f, 0x9b, 0xb3, 0xc6, 0x4f, 0xff, - 0x36, 0x7b, 0xee, 0x8d, 0x1b, 0x27, 0xfd, 0x19, 0xee, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0xc0, - 0x20, 0xb3, 0x2b, 0xda, 0x2b, 0x00, 0x00, + 0x2a, 0x9d, 0x74, 0x47, 0x95, 0xda, 0xe2, 0xaf, 0x3c, 0xc2, 0x96, 0x4b, 0xfe, 0x41, 0x09, 0x10, + 0xac, 0x83, 0x3c, 0x91, 0x6b, 0xc4, 0x03, 0x69, 0x64, 0x71, 0xbd, 0x37, 0xa0, 0x42, 0x67, 0x79, + 0xb4, 0xd9, 0x28, 0xe4, 0x93, 0x7f, 0x48, 0x61, 0x15, 0x7f, 0x91, 0x03, 0xb3, 0xcb, 0x31, 0x8d, + 0x02, 0x0f, 0x61, 0x1a, 0xc4, 0xa4, 0x8a, 0x97, 0x03, 0x37, 0xf6, 0xfc, 0x15, 0xbc, 0xeb, 0xf8, + 0x4e, 0xc4, 0x62, 0x74, 0x0e, 0xf4, 0xfb, 0x96, 0x87, 0x65, 0xcc, 0x8c, 0x4a, 0x4f, 0xf6, 0xdf, + 0xb6, 0x3c, 0x8c, 0x38, 0x87, 0x49, 0xb0, 0x10, 0x91, 0x3b, 0x40, 0x49, 0xdc, 0x39, 0x0a, 0x31, + 0xe2, 0x1c, 0x78, 0x0d, 0x0c, 0xee, 0x06, 0xc4, 0xb3, 0xc4, 0xea, 0x0d, 0xa7, 0xeb, 0x71, 0x93, + 0x53, 0x91, 0xe4, 0xc2, 0x17, 0xc0, 0x88, 0x8d, 0x69, 0x95, 0x38, 0x21, 0x83, 0x36, 0xfb, 0xb9, + 0xf0, 0x05, 0x29, 0x3c, 0xb2, 0x92, 0xb2, 0x90, 0x2e, 0x07, 0xaf, 0x83, 0x7c, 0x48, 0x9c, 0x80, + 0x38, 0xd1, 0x91, 0x39, 0x30, 0x67, 0xcc, 0x0f, 0x94, 0x27, 0xe4, 0x98, 0xfc, 0x96, 0xa4, 0x23, + 0x25, 0xc1, 0xa4, 0xdf, 0xa1, 0x81, 0xbf, 0x65, 0x45, 0x7b, 0xe6, 0x20, 0x47, 0x50, 0xd2, 0xaf, + 0x56, 0x36, 0x6f, 0x33, 0x3a, 0x52, 0x12, 0xc5, 0x3f, 0x1b, 0xc0, 0xcc, 0x7a, 0x28, 0x71, 0x2f, + 0xbc, 0x09, 0xf2, 0x34, 0x62, 0x39, 0xa7, 0x76, 0x24, 0xfd, 0xf3, 0x54, 0xa2, 0xaa, 0x22, 0xe9, + 0xc7, 0x8d, 0xc2, 0x74, 0x3a, 0x22, 0xa1, 0x72, 0xdf, 0xa8, 0xb1, 0x2c, 0xe4, 0x0e, 0xf0, 0xce, + 0x5e, 0x10, 0xec, 0xcb, 0xd5, 0x3f, 0x45, 0xc8, 0xbd, 0x26, 0x14, 0xa5, 0x98, 0x22, 0xe4, 0x24, + 0x19, 0x25, 0x40, 0xc5, 0xff, 0xe4, 0xb2, 0x13, 0xd3, 0x16, 0xfd, 0x6d, 0x90, 0x67, 0x5b, 0xc8, + 0xb6, 0x22, 0x4b, 0x6e, 0x82, 0x67, 0x1f, 0x6d, 0xc3, 0x89, 0xfd, 0xba, 0x81, 0x23, 0xab, 0x0c, + 0xa5, 0x2b, 0x40, 0x4a, 0x43, 0x4a, 0x2b, 0x3c, 0x04, 0xfd, 0x34, 0xc4, 0x55, 0x39, 0xdf, 0x7b, + 0xa7, 0x88, 0xf6, 0x2e, 0x73, 0xa8, 0x84, 0xb8, 0x9a, 0x06, 0x23, 0xfb, 0x87, 0x38, 0x22, 0x7c, + 0xd7, 0x00, 0x83, 0x94, 0xe7, 0x05, 0x99, 0x4b, 0xb6, 0xcf, 0x00, 0x3c, 0x93, 0x77, 0xc4, 0x7f, + 0x24, 0x71, 0x8b, 0xff, 0xcc, 0x81, 0xab, 0xdd, 0x86, 0x2e, 0x07, 0xbe, 0x2d, 0x16, 0x61, 0x4d, + 0xee, 0x2b, 0x11, 0x59, 0x2f, 0xe8, 0xfb, 0xea, 0xb8, 0x51, 0x78, 0xe2, 0xa1, 0x0a, 0xb4, 0x0d, + 0xf8, 0x55, 0x35, 0x65, 0xb1, 0x49, 0xaf, 0xb6, 0x1a, 0x76, 0xdc, 0x28, 0x8c, 0xab, 0x61, 0xad, + 0xb6, 0xc2, 0x3a, 0x80, 0xae, 0x45, 0xa3, 0x3b, 0xc4, 0xf2, 0xa9, 0x50, 0xeb, 0x78, 0x58, 0x7a, + 0xee, 0xa9, 0x47, 0x0b, 0x0a, 0x36, 0xa2, 0x3c, 0x23, 0x21, 0xe1, 0x7a, 0x9b, 0x36, 0xd4, 0x01, + 0x81, 0xe5, 0x0c, 0x82, 0x2d, 0xaa, 0xd2, 0x80, 0x96, 0xc3, 0x19, 0x15, 0x49, 0x2e, 0x7c, 0x12, + 0x0c, 0x79, 0x98, 0x52, 0xab, 0x86, 0xf9, 0xde, 0x1f, 0x4e, 0x0f, 0xc5, 0x0d, 0x41, 0x46, 0x09, + 0xbf, 0xf8, 0x2f, 0x03, 0x5c, 0xee, 0xe6, 0xb5, 0x75, 0x87, 0x46, 0xf0, 0x9b, 0x6d, 0x61, 0x5f, + 0x7a, 0xb4, 0x19, 0xb2, 0xd1, 0x3c, 0xe8, 0x55, 0x2a, 0x49, 0x28, 0x5a, 0xc8, 0x1f, 0x80, 0x01, + 0x27, 0xc2, 0x5e, 0x72, 0x5a, 0xa2, 0xde, 0x87, 0x5d, 0x79, 0x4c, 0xc2, 0x0f, 0xac, 0x31, 0x20, + 0x24, 0xf0, 0x8a, 0x1f, 0xe5, 0xc0, 0x95, 0x6e, 0x43, 0x58, 0x1e, 0xa7, 0xcc, 0xd9, 0xa1, 0x1b, + 0x13, 0xcb, 0x95, 0xc1, 0xa6, 0x9c, 0xbd, 0xc5, 0xa9, 0x48, 0x72, 0x59, 0xee, 0xa4, 0x8e, 0x5f, + 0x8b, 0x5d, 0x8b, 0xc8, 0x48, 0x52, 0x13, 0xae, 0x48, 0x3a, 0x52, 0x12, 0xb0, 0x04, 0x00, 0xdd, + 0x0b, 0x48, 0xc4, 0x31, 0x78, 0x85, 0x33, 0x5c, 0x3e, 0xcf, 0x32, 0x42, 0x45, 0x51, 0x91, 0x26, + 0xc1, 0x0e, 0x92, 0x7d, 0xc7, 0xb7, 0xe5, 0x82, 0xab, 0xbd, 0x7b, 0xcb, 0xf1, 0x6d, 0xc4, 0x39, + 0x0c, 0xdf, 0x75, 0x68, 0xc4, 0x28, 0x72, 0xb5, 0x5b, 0x1c, 0xce, 0x25, 0x95, 0x04, 0xc3, 0xaf, + 0xb2, 0x04, 0x1b, 0x10, 0x07, 0x53, 0x73, 0x30, 0xc5, 0x5f, 0x56, 0x54, 0xa4, 0x49, 0x14, 0xff, + 0xd2, 0xdf, 0x3d, 0x3e, 0x58, 0x02, 0x81, 0x8f, 0x83, 0x81, 0x1a, 0x09, 0xe2, 0x50, 0x7a, 0x49, + 0x79, 0xfb, 0x65, 0x46, 0x44, 0x82, 0x07, 0xbf, 0x0d, 0x06, 0x7c, 0x39, 0x61, 0x16, 0x41, 0xaf, + 0xf5, 0x7e, 0x99, 0xb9, 0xb7, 0x52, 0x74, 0xe1, 0x48, 0x01, 0x0a, 0x9f, 0x07, 0x03, 0xb4, 0x1a, + 0x84, 0x58, 0x3a, 0x71, 0x36, 0x11, 0xaa, 0x30, 0xe2, 0x71, 0xa3, 0x30, 0x96, 0xa8, 0xe3, 0x04, + 0x24, 0x84, 0xe1, 0xf7, 0x0d, 0x90, 0x97, 0xc7, 0x05, 0x35, 0x87, 0x78, 0x78, 0xbe, 0xde, 0x7b, + 0xbb, 0x65, 0xd9, 0x9b, 0xae, 0x99, 0x24, 0x50, 0xa4, 0xc0, 0xe1, 0x77, 0x0d, 0x00, 0xaa, 0xea, + 0xec, 0x32, 0x87, 0xb9, 0x0f, 0x7b, 0xb6, 0x55, 0xb4, 0x53, 0x51, 0x04, 0x42, 0x5a, 0x2a, 0x69, + 0xa8, 0xb0, 0x02, 0xa6, 0x42, 0x82, 0xb9, 0xee, 0xbb, 0xfe, 0xbe, 0x1f, 0x1c, 0xf8, 0x37, 0x1d, + 0xec, 0xda, 0xd4, 0x04, 0x73, 0xc6, 0x7c, 0xbe, 0x7c, 0x45, 0xda, 0x3f, 0xb5, 0xd5, 0x49, 0x08, + 0x75, 0x1e, 0x5b, 0x7c, 0xaf, 0x2f, 0x5b, 0x6b, 0x65, 0xcf, 0x0b, 0xf8, 0x81, 0x98, 0xbc, 0xc8, + 0xc3, 0xd4, 0x34, 0xf8, 0x42, 0xbc, 0xd9, 0xfb, 0x85, 0x50, 0xb9, 0x3e, 0x3d, 0xa4, 0x15, 0x89, + 0x22, 0xcd, 0x04, 0xf8, 0x53, 0x03, 0x8c, 0x59, 0xd5, 0x2a, 0x0e, 0x23, 0x6c, 0x8b, 0x6d, 0x9c, + 0x3b, 0xdb, 0xa8, 0x9e, 0x92, 0x06, 0x8d, 0x2d, 0xe9, 0xa8, 0xa8, 0xd5, 0x08, 0xf8, 0x12, 0x38, + 0x4f, 0xa3, 0x80, 0x60, 0x3b, 0x89, 0x20, 0x99, 0x5d, 0x60, 0xb3, 0x51, 0x38, 0x5f, 0x69, 0xe1, + 0xa0, 0x8c, 0x64, 0xf1, 0x93, 0x01, 0x50, 0x78, 0x48, 0x84, 0x3e, 0x42, 0xd1, 0x7b, 0x0d, 0x0c, + 0xf2, 0x99, 0xda, 0xdc, 0x21, 0x79, 0xed, 0xa8, 0xe7, 0x54, 0x24, 0xb9, 0xec, 0x78, 0x62, 0xf8, + 0xec, 0x78, 0xea, 0xe3, 0x82, 0xea, 0x78, 0xaa, 0x08, 0x32, 0x4a, 0xf8, 0x70, 0x11, 0x00, 0x1b, + 0x87, 0x04, 0xb3, 0x8c, 0x64, 0x9b, 0x43, 0x5c, 0x5a, 0xad, 0xcf, 0x8a, 0xe2, 0x20, 0x4d, 0x0a, + 0xde, 0x04, 0x30, 0xf9, 0xe7, 0x04, 0xfe, 0x6b, 0x16, 0xf1, 0x1d, 0xbf, 0x66, 0xe6, 0xb9, 0xd9, + 0xd3, 0xec, 0xb4, 0x5d, 0x69, 0xe3, 0xa2, 0x0e, 0x23, 0x60, 0x1d, 0x0c, 0x8a, 0x6b, 0x34, 0xcf, + 0x1b, 0x3d, 0xdc, 0x71, 0xf7, 0x2c, 0xd7, 0xb1, 0x39, 0x54, 0x19, 0x70, 0xf7, 0x70, 0x14, 0x24, + 0xd1, 0xe0, 0xfb, 0x06, 0x18, 0xa5, 0xf1, 0x0e, 0x91, 0xd2, 0x94, 0x67, 0xf5, 0x91, 0xc5, 0x3b, + 0xbd, 0x82, 0xaf, 0x68, 0xba, 0xcb, 0x13, 0xcd, 0x46, 0x61, 0x54, 0xa7, 0xa0, 0x16, 0x6c, 0xf8, + 0x07, 0x03, 0x98, 0x96, 0x2d, 0x42, 0xdf, 0x72, 0xb7, 0x88, 0xe3, 0x47, 0x98, 0x88, 0x0b, 0x91, + 0x38, 0x3e, 0x7a, 0x58, 0x2b, 0x66, 0xef, 0x59, 0xe5, 0x39, 0xb9, 0xd2, 0xe6, 0x52, 0x17, 0x0b, + 0x50, 0x57, 0xdb, 0x8a, 0xff, 0x36, 0xb2, 0xa9, 0x45, 0x9b, 0x65, 0xa5, 0x6a, 0xb9, 0x18, 0xae, + 0x80, 0x09, 0x56, 0xfd, 0x22, 0x1c, 0xba, 0x4e, 0xd5, 0xa2, 0xfc, 0xf6, 0x23, 0xa2, 0x5b, 0x5d, + 0xc3, 0x2b, 0x19, 0x3e, 0x6a, 0x1b, 0x01, 0x5f, 0x05, 0x50, 0x94, 0x85, 0x2d, 0x7a, 0x44, 0x25, + 0xa0, 0x0a, 0xbc, 0x4a, 0x9b, 0x04, 0xea, 0x30, 0x0a, 0x2e, 0x83, 0x49, 0xd7, 0xda, 0xc1, 0x6e, + 0x05, 0xbb, 0xb8, 0x1a, 0x05, 0x84, 0xab, 0x12, 0xf7, 0xc3, 0xa9, 0x66, 0xa3, 0x30, 0xb9, 0x9e, + 0x65, 0xa2, 0x76, 0xf9, 0xe2, 0xd5, 0xec, 0x5e, 0xd6, 0x27, 0x2e, 0x8a, 0xed, 0x0f, 0x73, 0x60, + 0xa6, 0x7b, 0x50, 0xc0, 0xef, 0xa8, 0xd2, 0x58, 0x54, 0x7c, 0xaf, 0x9f, 0x41, 0xe8, 0xc9, 0xeb, + 0x00, 0x68, 0xbf, 0x0a, 0xc0, 0x23, 0x76, 0x5e, 0x5b, 0x6e, 0x72, 0xed, 0xdf, 0x3e, 0x0b, 0x74, + 0xa6, 0xbf, 0x3c, 0x2c, 0xaa, 0x00, 0xcb, 0xe5, 0x87, 0xbe, 0xe5, 0xe2, 0xe2, 0x47, 0x6d, 0x57, + 0xdb, 0x74, 0xb3, 0xc2, 0x1f, 0x18, 0x60, 0x3c, 0x08, 0xb1, 0xbf, 0xb4, 0xb5, 0x76, 0xef, 0xcb, + 0x62, 0xd3, 0x4a, 0x07, 0xad, 0x9d, 0xdc, 0x44, 0x76, 0xbf, 0x16, 0xba, 0xb6, 0x48, 0x10, 0xd2, + 0xf2, 0x85, 0x66, 0xa3, 0x30, 0xbe, 0xd9, 0x8a, 0x82, 0xb2, 0xb0, 0x45, 0x0f, 0x4c, 0xad, 0x1e, + 0x46, 0x98, 0xf8, 0x96, 0xbb, 0x12, 0x54, 0x63, 0x0f, 0xfb, 0x91, 0xb0, 0x31, 0xd3, 0x2e, 0x30, + 0x1e, 0xb1, 0x5d, 0x70, 0x05, 0xf4, 0xc5, 0xc4, 0x95, 0x51, 0x3b, 0xa2, 0x9a, 0x60, 0x68, 0x1d, + 0x31, 0x7a, 0xf1, 0x2a, 0xe8, 0x67, 0x76, 0xc2, 0x4b, 0xa0, 0x8f, 0x58, 0x07, 0x5c, 0xeb, 0x68, + 0x79, 0x88, 0x89, 0x20, 0xeb, 0x00, 0x31, 0x5a, 0xf1, 0xef, 0x73, 0x60, 0x3c, 0x33, 0x17, 0x38, + 0x03, 0x72, 0xaa, 0xb3, 0x06, 0xa4, 0xd2, 0xdc, 0xda, 0x0a, 0xca, 0x39, 0x36, 0x7c, 0x51, 0x65, + 0x57, 0x01, 0x5a, 0x50, 0x87, 0x05, 0xa7, 0xb2, 0xb2, 0x2c, 0x55, 0xc7, 0x0c, 0x49, 0xd2, 0x23, + 0xb3, 0x01, 0xef, 0xca, 0x5d, 0x21, 0x6c, 0xc0, 0xbb, 0x88, 0xd1, 0x4e, 0xda, 0x2b, 0x49, 0x9a, + 0x35, 0x03, 0x8f, 0xd0, 0xac, 0x19, 0x7c, 0x60, 0xb3, 0xe6, 0x71, 0x30, 0x10, 0x39, 0x91, 0x8b, + 0xf9, 0x49, 0xa5, 0x15, 0xc3, 0x77, 0x18, 0x11, 0x09, 0x1e, 0xc4, 0x60, 0xc8, 0xc6, 0xbb, 0x56, + 0xec, 0x46, 0xfc, 0x50, 0x1a, 0x59, 0xfc, 0xfa, 0xe9, 0xa2, 0x47, 0x34, 0x33, 0x56, 0x84, 0x4a, + 0x94, 0xe8, 0x86, 0x4f, 0x80, 0x21, 0xcf, 0x3a, 0x74, 0xbc, 0xd8, 0xe3, 0x15, 0xa3, 0x21, 0xc4, + 0x36, 0x04, 0x09, 0x25, 0x3c, 0x96, 0x04, 0xf1, 0x61, 0xd5, 0x8d, 0xa9, 0x53, 0xc7, 0x92, 0x29, + 0x4b, 0x3a, 0x95, 0x04, 0x57, 0x33, 0x7c, 0xd4, 0x36, 0x82, 0x83, 0x39, 0x3e, 0x1f, 0x3c, 0xa2, + 0x81, 0x09, 0x12, 0x4a, 0x78, 0xad, 0x60, 0x52, 0x7e, 0xb4, 0x1b, 0x98, 0x1c, 0xdc, 0x36, 0x02, + 0x3e, 0x0d, 0x86, 0x3d, 0xeb, 0x70, 0x1d, 0xfb, 0xb5, 0x68, 0xcf, 0x1c, 0x9b, 0x33, 0xe6, 0xfb, + 0xca, 0x63, 0xcd, 0x46, 0x61, 0x78, 0x23, 0x21, 0xa2, 0x94, 0xcf, 0x85, 0x1d, 0x5f, 0x0a, 0x9f, + 0xd7, 0x84, 0x13, 0x22, 0x4a, 0xf9, 0xac, 0x32, 0x09, 0xad, 0x88, 0xed, 0x2b, 0x73, 0xbc, 0xf5, + 0xe2, 0xbc, 0x25, 0xc8, 0x28, 0xe1, 0xc3, 0x79, 0x90, 0xf7, 0xac, 0x43, 0x7e, 0xa7, 0x34, 0x27, + 0xb8, 0x5a, 0xde, 0x50, 0xdc, 0x90, 0x34, 0xa4, 0xb8, 0x5c, 0xd2, 0xf1, 0x85, 0xe4, 0xa4, 0x26, + 0x29, 0x69, 0x48, 0x71, 0x59, 0xfc, 0xc6, 0xbe, 0x73, 0x3f, 0xc6, 0x42, 0x18, 0x72, 0xcf, 0xa8, + 0xf8, 0xbd, 0x9b, 0xb2, 0x90, 0x2e, 0xc7, 0xee, 0x74, 0x5e, 0xec, 0x46, 0x4e, 0xe8, 0xe2, 0xcd, + 0x5d, 0xf3, 0x02, 0xf7, 0x3f, 0x2f, 0xe5, 0x37, 0x14, 0x15, 0x69, 0x12, 0xf0, 0x6d, 0xd0, 0x8f, + 0xfd, 0xd8, 0x33, 0x2f, 0xf2, 0xe3, 0xfb, 0xb4, 0xd1, 0xa7, 0xf6, 0xcb, 0xaa, 0x1f, 0x7b, 0x88, + 0x6b, 0x86, 0x2f, 0x82, 0x31, 0xcf, 0x3a, 0x64, 0x49, 0x00, 0x93, 0x88, 0x5d, 0x34, 0xa7, 0xf8, + 0xbc, 0x27, 0x59, 0x11, 0xbb, 0xa1, 0x33, 0x50, 0xab, 0x1c, 0x1f, 0xe8, 0xf8, 0xda, 0xc0, 0x69, + 0x6d, 0xa0, 0xce, 0x40, 0xad, 0x72, 0xcc, 0xc9, 0x04, 0xdf, 0x8f, 0x1d, 0x82, 0x6d, 0xf3, 0xff, + 0x78, 0xdd, 0x2b, 0xfb, 0xbb, 0x82, 0x86, 0x14, 0x17, 0xde, 0x4f, 0x5a, 0x0e, 0x26, 0xdf, 0x7c, + 0x5b, 0x3d, 0x4b, 0xdd, 0x9b, 0x64, 0x89, 0x10, 0xeb, 0x48, 0x9c, 0x2a, 0x7a, 0xb3, 0x01, 0xfa, + 0x60, 0xc0, 0x72, 0xdd, 0xcd, 0x5d, 0xf3, 0x12, 0xf7, 0x78, 0x0f, 0x4f, 0x0b, 0x95, 0x61, 0x96, + 0x98, 0x7e, 0x24, 0x60, 0x18, 0x5e, 0xe0, 0xb3, 0x58, 0x98, 0x39, 0x33, 0xbc, 0x4d, 0xa6, 0x1f, + 0x09, 0x18, 0x3e, 0x3f, 0xff, 0x68, 0x73, 0xd7, 0x7c, 0xec, 0xec, 0xe6, 0xc7, 0xf4, 0x23, 0x01, + 0x03, 0x6d, 0xd0, 0xe7, 0x07, 0x91, 0x79, 0xb9, 0xd7, 0x67, 0x2f, 0x3f, 0x4d, 0x6e, 0x07, 0x11, + 0x62, 0xea, 0xe1, 0x8f, 0x0c, 0x00, 0xc2, 0x34, 0x12, 0xaf, 0x9c, 0xb6, 0x05, 0x90, 0x41, 0x2b, + 0xa5, 0xd1, 0xbb, 0xea, 0x47, 0xe4, 0x28, 0xbd, 0xd7, 0x68, 0x51, 0xae, 0x19, 0x00, 0x7f, 0x6e, + 0x80, 0x8b, 0x7a, 0xb9, 0xab, 0x2c, 0x9b, 0xe5, 0x7e, 0xd8, 0xec, 0x61, 0x20, 0x97, 0x83, 0xc0, + 0x2d, 0x9b, 0xcd, 0x46, 0xe1, 0xe2, 0x52, 0x07, 0x40, 0xd4, 0xd1, 0x0c, 0xf8, 0x1b, 0x03, 0x4c, + 0xca, 0xec, 0xa8, 0x19, 0x57, 0xe0, 0x6e, 0x7b, 0xbb, 0x87, 0x6e, 0xcb, 0x42, 0x08, 0xef, 0xa9, + 0x57, 0xc6, 0x36, 0x3e, 0x6a, 0xb7, 0x0a, 0xfe, 0xde, 0x00, 0xa3, 0x36, 0x0e, 0xb1, 0x6f, 0x63, + 0xbf, 0xca, 0xcc, 0x9c, 0x3b, 0x6d, 0x5f, 0x21, 0x6b, 0xe6, 0x8a, 0xa6, 0x5d, 0x58, 0x58, 0x92, + 0x16, 0x8e, 0xea, 0xac, 0xe3, 0x46, 0x61, 0x3a, 0x1d, 0xaa, 0x73, 0x50, 0x8b, 0x81, 0xf0, 0xc7, + 0x06, 0x18, 0x4f, 0xdd, 0x2e, 0x0e, 0x88, 0xab, 0x67, 0xb3, 0xf0, 0xbc, 0x04, 0x5d, 0x6a, 0xc5, + 0x42, 0x59, 0x70, 0xf8, 0x5b, 0x83, 0x55, 0x5b, 0xc9, 0x5d, 0x8d, 0x9a, 0x45, 0xee, 0xc1, 0x37, + 0x7a, 0xe9, 0x41, 0xa5, 0x5c, 0x38, 0xf0, 0x7a, 0x5a, 0xc9, 0x29, 0xce, 0x71, 0xa3, 0x30, 0xa5, + 0xfb, 0x4f, 0x31, 0x90, 0x6e, 0x1c, 0x7c, 0xcf, 0x00, 0xa3, 0x38, 0x2d, 0x98, 0xa9, 0xf9, 0xf8, + 0x69, 0x5d, 0xd7, 0xb1, 0xfc, 0x16, 0xd7, 0x69, 0x8d, 0x45, 0x51, 0x0b, 0x2c, 0xab, 0xfd, 0xf0, + 0xa1, 0xe5, 0x85, 0x2e, 0x36, 0xff, 0xbf, 0x77, 0xb5, 0xdf, 0xaa, 0x50, 0x89, 0x12, 0xdd, 0xf0, + 0x3a, 0xc8, 0xfb, 0xb1, 0xeb, 0x5a, 0x3b, 0x2e, 0x36, 0x9f, 0xe0, 0x55, 0x84, 0xea, 0x2f, 0xde, + 0x96, 0x74, 0xa4, 0x24, 0xe0, 0x2e, 0x98, 0x3b, 0xbc, 0xa5, 0x3e, 0xbe, 0xe8, 0xd8, 0xc0, 0x33, + 0xaf, 0x71, 0x2d, 0x33, 0xcd, 0x46, 0x61, 0x7a, 0xbb, 0x73, 0x8b, 0xef, 0xa1, 0x3a, 0xe0, 0x9b, + 0xe0, 0x31, 0x4d, 0x66, 0xd5, 0xdb, 0xc1, 0xb6, 0x8d, 0xed, 0xe4, 0xa2, 0x65, 0x7e, 0x89, 0x43, + 0xa8, 0x7d, 0xbc, 0x9d, 0x15, 0x40, 0x0f, 0x1a, 0x0d, 0xd7, 0xc1, 0xb4, 0xc6, 0x5e, 0xf3, 0xa3, + 0x4d, 0x52, 0x89, 0x88, 0xe3, 0xd7, 0xcc, 0x79, 0xae, 0xf7, 0x62, 0xb2, 0xfb, 0xb6, 0x35, 0x1e, + 0xea, 0x32, 0x06, 0xbe, 0xd2, 0xa2, 0x8d, 0x3f, 0x5c, 0x58, 0xe1, 0x2d, 0x7c, 0x44, 0xcd, 0x27, + 0x79, 0x71, 0xc1, 0xd7, 0x79, 0x5b, 0xa3, 0xa3, 0x2e, 0xf2, 0xf0, 0x1b, 0xe0, 0x42, 0x86, 0xc3, + 0xee, 0x15, 0xe6, 0x53, 0xe2, 0x82, 0xc0, 0x2a, 0xd1, 0xed, 0x84, 0x88, 0x3a, 0x49, 0xc2, 0xaf, + 0x01, 0xa8, 0x91, 0x37, 0xac, 0x90, 0x8f, 0x7f, 0x5a, 0xdc, 0x55, 0xd8, 0x8a, 0x6e, 0x4b, 0x1a, + 0xea, 0x20, 0x07, 0x3f, 0x34, 0x5a, 0x66, 0x92, 0xde, 0x66, 0xa9, 0x79, 0x9d, 0x6f, 0xd8, 0x57, + 0x4e, 0x1e, 0x80, 0xa9, 0x32, 0x14, 0xbb, 0x58, 0xf3, 0xb0, 0x86, 0x82, 0xba, 0xa0, 0xcf, 0xb0, + 0xcb, 0x74, 0x26, 0x87, 0xc3, 0x09, 0xd0, 0xb7, 0x8f, 0xe5, 0xb3, 0x31, 0x62, 0x3f, 0xe1, 0x5b, + 0x60, 0xa0, 0x6e, 0xb9, 0x71, 0xd2, 0x0a, 0xe8, 0xdd, 0x59, 0x8f, 0x84, 0xde, 0x97, 0x72, 0x37, + 0x8c, 0x99, 0x0f, 0x0c, 0x30, 0xdd, 0xf9, 0x54, 0xf9, 0xa2, 0x2c, 0xfa, 0x99, 0x01, 0x26, 0xdb, + 0x0e, 0x90, 0x0e, 0xc6, 0xb8, 0xad, 0xc6, 0xdc, 0xeb, 0xe1, 0x49, 0x20, 0x36, 0x02, 0xaf, 0x68, + 0x75, 0xcb, 0x7e, 0x68, 0x80, 0x89, 0x6c, 0x62, 0xfe, 0x82, 0xbc, 0x54, 0x7c, 0x3f, 0x07, 0xa6, + 0x3b, 0xd7, 0xe0, 0xd0, 0x53, 0xdd, 0x85, 0x9e, 0x37, 0x68, 0x3a, 0xb5, 0x6c, 0xdf, 0x35, 0xc0, + 0xc8, 0x3b, 0x4a, 0x2e, 0x79, 0xcd, 0xec, 0x65, 0x57, 0x28, 0x39, 0xfa, 0x52, 0x06, 0x45, 0x3a, + 0x64, 0xf1, 0x77, 0x06, 0x98, 0xea, 0x78, 0x9c, 0xc3, 0x6b, 0x60, 0xd0, 0x72, 0xdd, 0xe0, 0x40, + 0x74, 0xf3, 0xb4, 0xb6, 0xfc, 0x12, 0xa7, 0x22, 0xc9, 0xd5, 0x7c, 0x96, 0xfb, 0x1c, 0x7c, 0x56, + 0xfc, 0xa3, 0x01, 0x2e, 0x3f, 0x28, 0xea, 0x3e, 0xef, 0x35, 0x9c, 0x07, 0x79, 0x59, 0x6c, 0x1f, + 0xf1, 0xf5, 0x93, 0xd9, 0x55, 0x66, 0x04, 0xfe, 0xb5, 0x8c, 0xf8, 0x55, 0xfc, 0xa5, 0x01, 0x26, + 0x2a, 0x98, 0xd4, 0x9d, 0x2a, 0x46, 0x78, 0x17, 0x13, 0xec, 0x57, 0x31, 0x5c, 0x00, 0xc3, 0xfc, + 0xb5, 0x31, 0xb4, 0xaa, 0xc9, 0x1b, 0xc9, 0xa4, 0x74, 0xf4, 0xf0, 0xed, 0x84, 0x81, 0x52, 0x19, + 0xf5, 0x9e, 0x92, 0xeb, 0xfa, 0x9e, 0x72, 0x19, 0xf4, 0x87, 0x69, 0x03, 0x38, 0xcf, 0xb8, 0xbc, + 0xe7, 0xcb, 0xa9, 0x9c, 0x1b, 0x90, 0x88, 0x77, 0xb9, 0x06, 0x24, 0x37, 0x20, 0x11, 0xe2, 0xd4, + 0xe2, 0xaf, 0x0d, 0x70, 0xbe, 0x35, 0x3f, 0x33, 0x40, 0x12, 0xbb, 0x6d, 0x0f, 0x38, 0x8c, 0x87, + 0x38, 0x47, 0xff, 0x6e, 0x20, 0xf7, 0xe0, 0xef, 0x06, 0xe0, 0xcb, 0x60, 0x52, 0xfe, 0x5c, 0x3d, + 0x0c, 0x09, 0xa6, 0xfc, 0x65, 0xb2, 0xaf, 0xf5, 0x7b, 0xbf, 0x8d, 0xac, 0x00, 0x6a, 0x1f, 0x53, + 0xfc, 0x93, 0x01, 0x2e, 0x24, 0xdf, 0xe7, 0xb8, 0x0e, 0xf6, 0xa3, 0xe5, 0xc0, 0xdf, 0x75, 0x6a, + 0xf0, 0x92, 0xe8, 0x48, 0x6a, 0x6d, 0xbe, 0xa4, 0x1b, 0x09, 0xef, 0x83, 0x21, 0x2a, 0xdc, 0x2f, + 0x23, 0xe3, 0xd5, 0x93, 0x47, 0x46, 0x76, 0x1d, 0x45, 0x41, 0x95, 0x50, 0x13, 0x1c, 0x16, 0x1c, + 0x55, 0xab, 0x1c, 0xfb, 0xb6, 0xec, 0x4a, 0x8f, 0x8a, 0xe0, 0x58, 0x5e, 0x12, 0x34, 0xa4, 0xb8, + 0xc5, 0x7f, 0x18, 0x60, 0xb2, 0xed, 0x7b, 0x23, 0xf8, 0x3d, 0x03, 0x8c, 0x56, 0xb5, 0xe9, 0xc9, + 0x2d, 0xb6, 0x71, 0xfa, 0x6f, 0x9a, 0x34, 0xa5, 0xa2, 0x2a, 0xd1, 0x29, 0xa8, 0x05, 0x14, 0x6e, + 0x03, 0xb3, 0x9a, 0xf9, 0xb4, 0x2f, 0xf3, 0x58, 0x78, 0xb9, 0xd9, 0x28, 0x98, 0xcb, 0x5d, 0x64, + 0x50, 0xd7, 0xd1, 0xe5, 0x6f, 0x7d, 0xfc, 0xd9, 0xec, 0xb9, 0x4f, 0x3e, 0x9b, 0x3d, 0xf7, 0xe9, + 0x67, 0xb3, 0xe7, 0xde, 0x6d, 0xce, 0x1a, 0x1f, 0x37, 0x67, 0x8d, 0x4f, 0x9a, 0xb3, 0xc6, 0xa7, + 0xcd, 0x59, 0xe3, 0xaf, 0xcd, 0x59, 0xe3, 0x27, 0x7f, 0x9b, 0x3d, 0xf7, 0xc6, 0x8d, 0x93, 0x7e, + 0xd0, 0xfb, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x41, 0x66, 0xfd, 0x82, 0x24, 0x2c, 0x00, 0x00, } func (m *ConversionRequest) Marshal() (dAtA []byte, err error) { @@ -2629,6 +2630,11 @@ func (m *ValidationRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i -= len(m.MessageExpression) + copy(dAtA[i:], m.MessageExpression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MessageExpression))) + i-- + dAtA[i] = 0x1a i -= len(m.Message) copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) @@ -3338,6 +3344,8 @@ func (m *ValidationRule) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Message) n += 1 + l + sovGenerated(uint64(l)) + l = len(m.MessageExpression) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -3813,6 +3821,7 @@ func (this *ValidationRule) String() string { s := strings.Join([]string{`&ValidationRule{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `MessageExpression:` + fmt.Sprintf("%v", this.MessageExpression) + `,`, `}`, }, "") return s @@ -8879,6 +8888,38 @@ func (m *ValidationRule) Unmarshal(dAtA []byte) error { } m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MessageExpression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MessageExpression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto index d0b190fd56..4632a83e59 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto @@ -107,12 +107,12 @@ message CustomResourceColumnDefinition { // CustomResourceConversion describes how to convert different versions of a CR. message CustomResourceConversion { // strategy specifies how custom resources are converted between versions. Allowed values are: - // - `None`: The converter only change the apiVersion and would not touch any other field in the custom resource. - // - `Webhook`: API Server will call to an external webhook to do the conversion. Additional information + // - `"None"`: The converter only change the apiVersion and would not touch any other field in the custom resource. + // - `"Webhook"`: API Server will call to an external webhook to do the conversion. Additional information // is needed for this option. This requires spec.preserveUnknownFields to be false, and spec.conversion.webhook to be set. optional string strategy = 1; - // webhook describes how to call the conversion webhook. Required when `strategy` is set to `Webhook`. + // webhook describes how to call the conversion webhook. Required when `strategy` is set to `"Webhook"`. // +optional optional WebhookConversion webhook = 2; } @@ -665,6 +665,19 @@ message ValidationRule { // If unset, the message is "failed rule: {Rule}". // e.g. "must be a URL with the host matching spec.host" optional string message = 2; + + // MessageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. + // Since messageExpression is used as a failure message, it must evaluate to a string. + // If both message and messageExpression are present on a rule, then messageExpression will be used if validation + // fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced + // as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string + // that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and + // the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. + // messageExpression has access to all the same variables as the rule; the only difference is the return type. + // Example: + // "x must be less than max ("+string(self.max)+")" + // +optional + optional string messageExpression = 3; } // WebhookClientConfig contains the information to make a TLS connection with the webhook. diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types.go index 285058d77a..59ec0e372b 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types.go @@ -74,12 +74,12 @@ type CustomResourceDefinitionSpec struct { // CustomResourceConversion describes how to convert different versions of a CR. type CustomResourceConversion struct { // strategy specifies how custom resources are converted between versions. Allowed values are: - // - `None`: The converter only change the apiVersion and would not touch any other field in the custom resource. - // - `Webhook`: API Server will call to an external webhook to do the conversion. Additional information + // - `"None"`: The converter only change the apiVersion and would not touch any other field in the custom resource. + // - `"Webhook"`: API Server will call to an external webhook to do the conversion. Additional information // is needed for this option. This requires spec.preserveUnknownFields to be false, and spec.conversion.webhook to be set. Strategy ConversionStrategyType `json:"strategy" protobuf:"bytes,1,name=strategy"` - // webhook describes how to call the conversion webhook. Required when `strategy` is set to `Webhook`. + // webhook describes how to call the conversion webhook. Required when `strategy` is set to `"Webhook"`. // +optional Webhook *WebhookConversion `json:"webhook,omitempty" protobuf:"bytes,2,opt,name=webhook"` } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go index 277fd7a124..b348d0d19b 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go @@ -235,12 +235,24 @@ type ValidationRule struct { // If unset, the message is "failed rule: {Rule}". // e.g. "must be a URL with the host matching spec.host" Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` + // MessageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. + // Since messageExpression is used as a failure message, it must evaluate to a string. + // If both message and messageExpression are present on a rule, then messageExpression will be used if validation + // fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced + // as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string + // that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and + // the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. + // messageExpression has access to all the same variables as the rule; the only difference is the return type. + // Example: + // "x must be less than max ("+string(self.max)+")" + // +optional + MessageExpression string `json:"messageExpression,omitempty" protobuf:"bytes,3,opt,name=messageExpression"` } // JSON represents any valid JSON value. // These types are supported: bool, int64, float64, string, []interface{}, map[string]interface{} and nil. type JSON struct { - Raw []byte `protobuf:"bytes,1,opt,name=raw"` + Raw []byte `json:"-" protobuf:"bytes,1,opt,name=raw"` } // OpenAPISchemaType is used by the kube-openapi generator when constructing diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.conversion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.conversion.go index 95a58529b1..cde5275cef 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.conversion.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.conversion.go @@ -1258,6 +1258,7 @@ func Convert_apiextensions_ServiceReference_To_v1_ServiceReference(in *apiextens func autoConvert_v1_ValidationRule_To_apiextensions_ValidationRule(in *ValidationRule, out *apiextensions.ValidationRule, s conversion.Scope) error { out.Rule = in.Rule out.Message = in.Message + out.MessageExpression = in.MessageExpression return nil } @@ -1269,6 +1270,7 @@ func Convert_v1_ValidationRule_To_apiextensions_ValidationRule(in *ValidationRul func autoConvert_apiextensions_ValidationRule_To_v1_ValidationRule(in *apiextensions.ValidationRule, out *ValidationRule, s conversion.Scope) error { out.Rule = in.Rule out.Message = in.Message + out.MessageExpression = in.MessageExpression return nil } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go index 6ab5006676..011bd72dc7 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go @@ -785,200 +785,202 @@ func init() { } var fileDescriptor_98a4cc6918394e53 = []byte{ - // 3079 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0xcf, 0x73, 0x23, 0x47, - 0xf5, 0xdf, 0x91, 0x2d, 0x5b, 0x6e, 0xdb, 0x6b, 0xbb, 0x77, 0xed, 0xcc, 0x3a, 0x1b, 0xcb, 0x56, - 0xbe, 0xd9, 0xaf, 0x93, 0x6c, 0xe4, 0xc4, 0x24, 0x24, 0xa4, 0x48, 0x51, 0x96, 0xed, 0x0d, 0x4e, - 0xd6, 0x3f, 0x68, 0xed, 0x26, 0x86, 0xfc, 0x6c, 0x6b, 0xda, 0xf2, 0xc4, 0xa3, 0x99, 0xd9, 0xe9, - 0x19, 0xd9, 0xae, 0x00, 0xc5, 0x8f, 0x4a, 0x41, 0x51, 0x40, 0x28, 0x92, 0x0b, 0x05, 0x1c, 0x02, - 0xc5, 0x85, 0x03, 0x1c, 0xe0, 0x06, 0x7f, 0x40, 0x8e, 0x29, 0x8a, 0x43, 0x0e, 0x94, 0x20, 0xe2, - 0xca, 0x91, 0x2a, 0xaa, 0x7c, 0xa2, 0xfa, 0xc7, 0xf4, 0xb4, 0x46, 0xd2, 0xee, 0x56, 0x2c, 0x65, - 0xb9, 0x69, 0xde, 0xaf, 0xcf, 0x9b, 0xd7, 0xaf, 0x5f, 0xbf, 0x7e, 0x23, 0xb0, 0x7f, 0xf8, 0x0c, - 0x2d, 0xda, 0xde, 0xd2, 0x61, 0xb4, 0x47, 0x02, 0x97, 0x84, 0x84, 0x2e, 0xd5, 0x89, 0x6b, 0x79, - 0xc1, 0x92, 0x64, 0x60, 0xdf, 0x26, 0xc7, 0x21, 0x71, 0xa9, 0xed, 0xb9, 0xf4, 0x31, 0xec, 0xdb, - 0x94, 0x04, 0x75, 0x12, 0x2c, 0xf9, 0x87, 0x55, 0xc6, 0xa3, 0xad, 0x02, 0x4b, 0xf5, 0x27, 0xf6, - 0x48, 0x88, 0x9f, 0x58, 0xaa, 0x12, 0x97, 0x04, 0x38, 0x24, 0x56, 0xd1, 0x0f, 0xbc, 0xd0, 0x83, - 0xcf, 0x09, 0x73, 0xc5, 0x16, 0xe9, 0x37, 0x94, 0xb9, 0xa2, 0x7f, 0x58, 0x65, 0x3c, 0xda, 0x2a, - 0x50, 0x94, 0xe6, 0x66, 0x1f, 0xab, 0xda, 0xe1, 0x41, 0xb4, 0x57, 0xac, 0x78, 0xb5, 0xa5, 0xaa, - 0x57, 0xf5, 0x96, 0xb8, 0xd5, 0xbd, 0x68, 0x9f, 0x3f, 0xf1, 0x07, 0xfe, 0x4b, 0xa0, 0xcd, 0x3e, - 0x99, 0x38, 0x5f, 0xc3, 0x95, 0x03, 0xdb, 0x25, 0xc1, 0x49, 0xe2, 0x71, 0x8d, 0x84, 0x78, 0xa9, - 0xde, 0xe6, 0xe3, 0xec, 0x52, 0x37, 0xad, 0x20, 0x72, 0x43, 0xbb, 0x46, 0xda, 0x14, 0x3e, 0x7f, - 0x27, 0x05, 0x5a, 0x39, 0x20, 0x35, 0x9c, 0xd6, 0x2b, 0x9c, 0x1a, 0x60, 0x6a, 0xd5, 0x73, 0xeb, - 0x24, 0x60, 0x6f, 0x89, 0xc8, 0xad, 0x88, 0xd0, 0x10, 0x96, 0xc0, 0x40, 0x64, 0x5b, 0xa6, 0x31, - 0x6f, 0x2c, 0x8e, 0x94, 0x1e, 0xff, 0xb0, 0x91, 0x3f, 0xd7, 0x6c, 0xe4, 0x07, 0x6e, 0x6e, 0xac, - 0x9d, 0x36, 0xf2, 0x0b, 0xdd, 0x90, 0xc2, 0x13, 0x9f, 0xd0, 0xe2, 0xcd, 0x8d, 0x35, 0xc4, 0x94, - 0xe1, 0xf3, 0x60, 0xca, 0x22, 0xd4, 0x0e, 0x88, 0xb5, 0xb2, 0xb3, 0xf1, 0x92, 0xb0, 0x6f, 0x66, - 0xb8, 0xc5, 0x4b, 0xd2, 0xe2, 0xd4, 0x5a, 0x5a, 0x00, 0xb5, 0xeb, 0xc0, 0x5d, 0x30, 0xec, 0xed, - 0xbd, 0x45, 0x2a, 0x21, 0x35, 0x07, 0xe6, 0x07, 0x16, 0x47, 0x97, 0x1f, 0x2b, 0x26, 0x2b, 0xa8, - 0x5c, 0xe0, 0xcb, 0x26, 0x5f, 0xb6, 0x88, 0xf0, 0xd1, 0x7a, 0xbc, 0x72, 0xa5, 0x09, 0x89, 0x36, - 0xbc, 0x2d, 0xac, 0xa0, 0xd8, 0x5c, 0xe1, 0xd7, 0x19, 0x00, 0xf5, 0x97, 0xa7, 0xbe, 0xe7, 0x52, - 0xd2, 0x93, 0xb7, 0xa7, 0x60, 0xb2, 0xc2, 0x2d, 0x87, 0xc4, 0x92, 0xb8, 0x66, 0xe6, 0xd3, 0x78, - 0x6f, 0x4a, 0xfc, 0xc9, 0xd5, 0x94, 0x39, 0xd4, 0x06, 0x00, 0x6f, 0x80, 0xa1, 0x80, 0xd0, 0xc8, - 0x09, 0xcd, 0x81, 0x79, 0x63, 0x71, 0x74, 0xf9, 0x6a, 0x57, 0x28, 0x9e, 0xdf, 0x2c, 0xf9, 0x8a, - 0xf5, 0x27, 0x8a, 0xe5, 0x10, 0x87, 0x11, 0x2d, 0x9d, 0x97, 0x48, 0x43, 0x88, 0xdb, 0x40, 0xd2, - 0x56, 0xe1, 0xfb, 0x19, 0x30, 0xa9, 0x47, 0xa9, 0x6e, 0x93, 0x23, 0x78, 0x04, 0x86, 0x03, 0x91, - 0x2c, 0x3c, 0x4e, 0xa3, 0xcb, 0x3b, 0xc5, 0x33, 0x6d, 0xab, 0x62, 0x5b, 0x12, 0x96, 0x46, 0xd9, - 0x9a, 0xc9, 0x07, 0x14, 0xa3, 0xc1, 0xb7, 0x41, 0x2e, 0x90, 0x0b, 0xc5, 0xb3, 0x69, 0x74, 0xf9, - 0x2b, 0x3d, 0x44, 0x16, 0x86, 0x4b, 0x63, 0xcd, 0x46, 0x3e, 0x17, 0x3f, 0x21, 0x05, 0x58, 0x78, - 0x2f, 0x03, 0xe6, 0x56, 0x23, 0x1a, 0x7a, 0x35, 0x44, 0xa8, 0x17, 0x05, 0x15, 0xb2, 0xea, 0x39, - 0x51, 0xcd, 0x5d, 0x23, 0xfb, 0xb6, 0x6b, 0x87, 0x2c, 0x5b, 0xe7, 0xc1, 0xa0, 0x8b, 0x6b, 0x44, - 0x66, 0xcf, 0x98, 0x8c, 0xe9, 0xe0, 0x16, 0xae, 0x11, 0xc4, 0x39, 0x4c, 0x82, 0x25, 0x8b, 0xdc, - 0x0b, 0x4a, 0xe2, 0xc6, 0x89, 0x4f, 0x10, 0xe7, 0xc0, 0x2b, 0x60, 0x68, 0xdf, 0x0b, 0x6a, 0x58, - 0xac, 0xe3, 0x48, 0xb2, 0x32, 0xd7, 0x38, 0x15, 0x49, 0x2e, 0x7c, 0x0a, 0x8c, 0x5a, 0x84, 0x56, - 0x02, 0xdb, 0x67, 0xd0, 0xe6, 0x20, 0x17, 0xbe, 0x20, 0x85, 0x47, 0xd7, 0x12, 0x16, 0xd2, 0xe5, - 0xe0, 0x55, 0x90, 0xf3, 0x03, 0xdb, 0x0b, 0xec, 0xf0, 0xc4, 0xcc, 0xce, 0x1b, 0x8b, 0xd9, 0xd2, - 0xa4, 0xd4, 0xc9, 0xed, 0x48, 0x3a, 0x52, 0x12, 0x70, 0x1e, 0xe4, 0x5e, 0x28, 0x6f, 0x6f, 0xed, - 0xe0, 0xf0, 0xc0, 0x1c, 0xe2, 0x08, 0x83, 0x4c, 0x1a, 0x29, 0x6a, 0xe1, 0x6f, 0x19, 0x60, 0xa6, - 0xa3, 0x12, 0x87, 0x14, 0x5e, 0x03, 0x39, 0x1a, 0xb2, 0x8a, 0x53, 0x3d, 0x91, 0x31, 0x79, 0x24, - 0x06, 0x2b, 0x4b, 0xfa, 0x69, 0x23, 0x3f, 0x93, 0x68, 0xc4, 0x54, 0x1e, 0x0f, 0xa5, 0x0b, 0x7f, - 0x69, 0x80, 0x0b, 0x47, 0x64, 0xef, 0xc0, 0xf3, 0x0e, 0x57, 0x1d, 0x9b, 0xb8, 0xe1, 0xaa, 0xe7, - 0xee, 0xdb, 0x55, 0x99, 0x03, 0xe8, 0x8c, 0x39, 0xf0, 0x72, 0xbb, 0xe5, 0xd2, 0x7d, 0xcd, 0x46, - 0xfe, 0x42, 0x07, 0x06, 0xea, 0xe4, 0x07, 0xdc, 0x05, 0x66, 0x25, 0xb5, 0x49, 0x64, 0x01, 0x13, - 0x65, 0x6b, 0xa4, 0x74, 0xb9, 0xd9, 0xc8, 0x9b, 0xab, 0x5d, 0x64, 0x50, 0x57, 0xed, 0xc2, 0x77, - 0x07, 0xd2, 0xe1, 0xd5, 0xd2, 0xed, 0x4d, 0x90, 0x63, 0xdb, 0xd8, 0xc2, 0x21, 0x96, 0x1b, 0xf1, - 0xf1, 0xbb, 0xdb, 0xf4, 0xa2, 0x66, 0x6c, 0x92, 0x10, 0x97, 0xa0, 0x5c, 0x10, 0x90, 0xd0, 0x90, - 0xb2, 0x0a, 0xbf, 0x01, 0x06, 0xa9, 0x4f, 0x2a, 0x32, 0xd0, 0xaf, 0x9c, 0x75, 0xb3, 0x75, 0x79, - 0x91, 0xb2, 0x4f, 0x2a, 0xc9, 0x5e, 0x60, 0x4f, 0x88, 0xc3, 0xc2, 0x77, 0x0c, 0x30, 0x44, 0x79, - 0x81, 0x92, 0x45, 0xed, 0xb5, 0x7e, 0x79, 0x90, 0xaa, 0x82, 0xe2, 0x19, 0x49, 0xf0, 0xc2, 0xbf, - 0x33, 0x60, 0xa1, 0x9b, 0xea, 0xaa, 0xe7, 0x5a, 0x62, 0x39, 0x36, 0xe4, 0xde, 0x16, 0x99, 0xfe, - 0x94, 0xbe, 0xb7, 0x4f, 0x1b, 0xf9, 0x87, 0xee, 0x68, 0x40, 0x2b, 0x02, 0x5f, 0x50, 0xef, 0x2d, - 0x0a, 0xc5, 0x42, 0xab, 0x63, 0xa7, 0x8d, 0xfc, 0x84, 0x52, 0x6b, 0xf5, 0x15, 0xd6, 0x01, 0x74, - 0x30, 0x0d, 0x6f, 0x04, 0xd8, 0xa5, 0xc2, 0xac, 0x5d, 0x23, 0x32, 0x7c, 0x8f, 0xdc, 0x5d, 0x7a, - 0x30, 0x8d, 0xd2, 0xac, 0x84, 0x84, 0xd7, 0xdb, 0xac, 0xa1, 0x0e, 0x08, 0xac, 0x6e, 0x05, 0x04, - 0x53, 0x55, 0x8a, 0xb4, 0x13, 0x85, 0x51, 0x91, 0xe4, 0xc2, 0x87, 0xc1, 0x70, 0x8d, 0x50, 0x8a, - 0xab, 0x84, 0xd7, 0x9f, 0x91, 0xe4, 0x88, 0xde, 0x14, 0x64, 0x14, 0xf3, 0x59, 0x7f, 0x72, 0xb9, - 0x5b, 0xd4, 0xae, 0xdb, 0x34, 0x84, 0xaf, 0xb6, 0x6d, 0x80, 0xe2, 0xdd, 0xbd, 0x21, 0xd3, 0xe6, - 0xe9, 0xaf, 0x8a, 0x5f, 0x4c, 0xd1, 0x92, 0xff, 0xeb, 0x20, 0x6b, 0x87, 0xa4, 0x16, 0x9f, 0xdd, - 0x2f, 0xf7, 0x29, 0xf7, 0x4a, 0xe3, 0xd2, 0x87, 0xec, 0x06, 0x43, 0x43, 0x02, 0xb4, 0xf0, 0x9b, - 0x0c, 0x78, 0xa0, 0x9b, 0x0a, 0x3b, 0x50, 0x28, 0x8b, 0xb8, 0xef, 0x44, 0x01, 0x76, 0x64, 0xc6, - 0xa9, 0x88, 0xef, 0x70, 0x2a, 0x92, 0x5c, 0x56, 0xf2, 0xa9, 0xed, 0x56, 0x23, 0x07, 0x07, 0x32, - 0x9d, 0xd4, 0x5b, 0x97, 0x25, 0x1d, 0x29, 0x09, 0x58, 0x04, 0x80, 0x1e, 0x78, 0x41, 0xc8, 0x31, - 0x64, 0xf5, 0x3a, 0xcf, 0x0a, 0x44, 0x59, 0x51, 0x91, 0x26, 0xc1, 0x4e, 0xb4, 0x43, 0xdb, 0xb5, - 0xe4, 0xaa, 0xab, 0x5d, 0xfc, 0xa2, 0xed, 0x5a, 0x88, 0x73, 0x18, 0xbe, 0x63, 0xd3, 0x90, 0x51, - 0xe4, 0x92, 0xb7, 0x44, 0x9d, 0x4b, 0x2a, 0x09, 0x86, 0x5f, 0x61, 0x55, 0xdf, 0x0b, 0x6c, 0x42, - 0xcd, 0xa1, 0x04, 0x7f, 0x55, 0x51, 0x91, 0x26, 0x51, 0xf8, 0x57, 0xae, 0x7b, 0x92, 0xb0, 0x52, - 0x02, 0x1f, 0x04, 0xd9, 0x6a, 0xe0, 0x45, 0xbe, 0x8c, 0x92, 0x8a, 0xf6, 0xf3, 0x8c, 0x88, 0x04, - 0x8f, 0x65, 0x65, 0xbd, 0xa5, 0x4d, 0x55, 0x59, 0x19, 0x37, 0xa7, 0x31, 0x1f, 0x7e, 0xdb, 0x00, - 0x59, 0x57, 0x06, 0x87, 0xa5, 0xdc, 0xab, 0x7d, 0xca, 0x0b, 0x1e, 0xde, 0xc4, 0x5d, 0x11, 0x79, - 0x81, 0x0c, 0x9f, 0x04, 0x59, 0x5a, 0xf1, 0x7c, 0x22, 0xa3, 0x3e, 0x17, 0x0b, 0x95, 0x19, 0xf1, - 0xb4, 0x91, 0x1f, 0x8f, 0xcd, 0x71, 0x02, 0x12, 0xc2, 0xf0, 0x7b, 0x06, 0x00, 0x75, 0xec, 0xd8, - 0x16, 0xe6, 0x2d, 0x43, 0x96, 0xbb, 0xdf, 0xdb, 0xb4, 0x7e, 0x49, 0x99, 0x17, 0x8b, 0x96, 0x3c, - 0x23, 0x0d, 0x1a, 0xbe, 0x6b, 0x80, 0x31, 0x1a, 0xed, 0x05, 0x52, 0x8b, 0xf2, 0xe6, 0x62, 0x74, - 0xf9, 0xab, 0x3d, 0xf5, 0xa5, 0xac, 0x01, 0x94, 0x26, 0x9b, 0x8d, 0xfc, 0x98, 0x4e, 0x41, 0x2d, - 0x0e, 0xc0, 0x1f, 0x1a, 0x20, 0x57, 0x8f, 0xcf, 0xec, 0x61, 0xbe, 0xe1, 0x5f, 0xef, 0xd3, 0xc2, - 0xca, 0x8c, 0x4a, 0x76, 0x81, 0xea, 0x03, 0x94, 0x07, 0xf0, 0x4f, 0x06, 0x30, 0xb1, 0x25, 0x0a, - 0x3c, 0x76, 0x76, 0x02, 0xdb, 0x0d, 0x49, 0x20, 0xfa, 0x4d, 0x6a, 0xe6, 0xb8, 0x7b, 0xbd, 0x3d, - 0x0b, 0xd3, 0xbd, 0x6c, 0x69, 0x5e, 0x7a, 0x67, 0xae, 0x74, 0x71, 0x03, 0x75, 0x75, 0x90, 0x27, - 0x5a, 0xd2, 0xd2, 0x98, 0x23, 0x7d, 0x48, 0xb4, 0xa4, 0x97, 0x92, 0xd5, 0x21, 0xe9, 0xa0, 0x34, - 0x68, 0xb8, 0x0d, 0xa6, 0xfd, 0x80, 0x70, 0x80, 0x9b, 0xee, 0xa1, 0xeb, 0x1d, 0xb9, 0xd7, 0x6c, - 0xe2, 0x58, 0xd4, 0x04, 0xf3, 0xc6, 0x62, 0xae, 0x74, 0xa9, 0xd9, 0xc8, 0x4f, 0xef, 0x74, 0x12, - 0x40, 0x9d, 0xf5, 0x0a, 0xef, 0x0e, 0xa4, 0x6f, 0x01, 0xe9, 0x2e, 0x02, 0xbe, 0x2f, 0xde, 0x5e, - 0xc4, 0x86, 0x9a, 0x06, 0x5f, 0xad, 0x37, 0xfb, 0x94, 0x4c, 0xaa, 0x0d, 0x48, 0x3a, 0x39, 0x45, - 0xa2, 0x48, 0xf3, 0x03, 0xfe, 0xcc, 0x00, 0xe3, 0xb8, 0x52, 0x21, 0x7e, 0x48, 0x2c, 0x51, 0xdc, - 0x33, 0x9f, 0x41, 0xfd, 0x9a, 0x96, 0x5e, 0x8d, 0xaf, 0xe8, 0xd0, 0xa8, 0xd5, 0x13, 0xf8, 0x2c, - 0x38, 0x4f, 0x43, 0x2f, 0x20, 0x56, 0xaa, 0x6d, 0x86, 0xcd, 0x46, 0xfe, 0x7c, 0xb9, 0x85, 0x83, - 0x52, 0x92, 0x85, 0xbf, 0x67, 0x41, 0xfe, 0x0e, 0x5b, 0xed, 0x2e, 0x2e, 0x66, 0x57, 0xc0, 0x10, - 0x7f, 0x5d, 0x8b, 0x47, 0x25, 0xa7, 0xb5, 0x82, 0x9c, 0x8a, 0x24, 0x97, 0x1d, 0x14, 0x0c, 0x9f, - 0xb5, 0x2f, 0x03, 0x5c, 0x50, 0x1d, 0x14, 0x65, 0x41, 0x46, 0x31, 0x1f, 0x2e, 0x03, 0x60, 0x11, - 0x3f, 0x20, 0xec, 0xb0, 0xb2, 0xcc, 0x61, 0x2e, 0xad, 0x16, 0x69, 0x4d, 0x71, 0x90, 0x26, 0x05, - 0xaf, 0x01, 0x18, 0x3f, 0xd9, 0x9e, 0xfb, 0x32, 0x0e, 0x5c, 0xdb, 0xad, 0x9a, 0x39, 0xee, 0xf6, - 0x0c, 0xeb, 0xc6, 0xd6, 0xda, 0xb8, 0xa8, 0x83, 0x06, 0x7c, 0x1b, 0x0c, 0x89, 0xa1, 0x0f, 0x3f, - 0x21, 0xfa, 0x58, 0xe5, 0x01, 0x8f, 0x11, 0x87, 0x42, 0x12, 0xb2, 0xbd, 0xba, 0x67, 0xef, 0x75, - 0x75, 0xbf, 0x6d, 0x39, 0x1d, 0xfa, 0x1f, 0x2f, 0xa7, 0x85, 0xff, 0x18, 0xe9, 0x9a, 0xa3, 0xbd, - 0x6a, 0xb9, 0x82, 0x1d, 0x02, 0xd7, 0xc0, 0x24, 0xbb, 0x31, 0x21, 0xe2, 0x3b, 0x76, 0x05, 0x53, - 0x7e, 0x61, 0x17, 0xc9, 0xae, 0x66, 0x48, 0xe5, 0x14, 0x1f, 0xb5, 0x69, 0xc0, 0x17, 0x00, 0x14, - 0xb7, 0x88, 0x16, 0x3b, 0xa2, 0x21, 0x52, 0xf7, 0x81, 0x72, 0x9b, 0x04, 0xea, 0xa0, 0x05, 0x57, - 0xc1, 0x94, 0x83, 0xf7, 0x88, 0x53, 0x26, 0x0e, 0xa9, 0x84, 0x5e, 0xc0, 0x4d, 0x89, 0x91, 0xc6, - 0x74, 0xb3, 0x91, 0x9f, 0xba, 0x9e, 0x66, 0xa2, 0x76, 0xf9, 0xc2, 0x42, 0x7a, 0x6b, 0xeb, 0x2f, - 0x2e, 0xee, 0x66, 0x1f, 0x64, 0xc0, 0x6c, 0xf7, 0xcc, 0x80, 0xdf, 0x49, 0xae, 0x90, 0xe2, 0x86, - 0xf0, 0x7a, 0xbf, 0xb2, 0x50, 0xde, 0x21, 0x41, 0xfb, 0xfd, 0x11, 0x7e, 0x93, 0xb5, 0x6b, 0xd8, - 0x89, 0x87, 0x56, 0xaf, 0xf5, 0xcd, 0x05, 0x06, 0x52, 0x1a, 0x11, 0x9d, 0x20, 0x76, 0x78, 0xe3, - 0x87, 0x1d, 0x52, 0xf8, 0xad, 0x91, 0x9e, 0x22, 0x24, 0x3b, 0x18, 0xfe, 0xc8, 0x00, 0x13, 0x9e, - 0x4f, 0xdc, 0x95, 0x9d, 0x8d, 0x97, 0x3e, 0x27, 0x76, 0xb2, 0x0c, 0xd5, 0xd6, 0x19, 0xfd, 0x7c, - 0xa1, 0xbc, 0xbd, 0x25, 0x0c, 0xee, 0x04, 0x9e, 0x4f, 0x4b, 0x17, 0x9a, 0x8d, 0xfc, 0xc4, 0x76, - 0x2b, 0x14, 0x4a, 0x63, 0x17, 0x6a, 0x60, 0x7a, 0xfd, 0x38, 0x24, 0x81, 0x8b, 0x9d, 0x35, 0xaf, - 0x12, 0xd5, 0x88, 0x1b, 0x0a, 0x47, 0x53, 0x13, 0x2f, 0xe3, 0x2e, 0x27, 0x5e, 0x0f, 0x80, 0x81, - 0x28, 0x70, 0x64, 0x16, 0x8f, 0xaa, 0x89, 0x2e, 0xba, 0x8e, 0x18, 0xbd, 0xb0, 0x00, 0x06, 0x99, - 0x9f, 0xf0, 0x12, 0x18, 0x08, 0xf0, 0x11, 0xb7, 0x3a, 0x56, 0x1a, 0x66, 0x22, 0x08, 0x1f, 0x21, - 0x46, 0x2b, 0xfc, 0x75, 0x01, 0x4c, 0xa4, 0xde, 0x05, 0xce, 0x82, 0x8c, 0x1a, 0x13, 0x03, 0x69, - 0x34, 0xb3, 0xb1, 0x86, 0x32, 0xb6, 0x05, 0x9f, 0x56, 0xc5, 0x57, 0x80, 0xe6, 0xd5, 0x59, 0xc2, - 0xa9, 0xac, 0x3f, 0x4f, 0xcc, 0x31, 0x47, 0xe2, 0xc2, 0xc9, 0x7c, 0x20, 0xfb, 0x72, 0x97, 0x08, - 0x1f, 0xc8, 0x3e, 0x62, 0xb4, 0x4f, 0x3b, 0xee, 0x8b, 0xe7, 0x8d, 0xd9, 0xbb, 0x98, 0x37, 0x0e, - 0xdd, 0x76, 0xde, 0xf8, 0x20, 0xc8, 0x86, 0x76, 0xe8, 0x10, 0x7e, 0x90, 0x69, 0xd7, 0xa8, 0x1b, - 0x8c, 0x88, 0x04, 0x0f, 0xbe, 0x05, 0x86, 0x2d, 0xb2, 0x8f, 0x23, 0x27, 0xe4, 0x67, 0xd6, 0xe8, - 0xf2, 0x6a, 0x0f, 0x52, 0x48, 0x0c, 0x83, 0xd7, 0x84, 0x5d, 0x14, 0x03, 0xc0, 0x87, 0xc0, 0x70, - 0x0d, 0x1f, 0xdb, 0xb5, 0xa8, 0xc6, 0x1b, 0x4c, 0x43, 0x88, 0x6d, 0x0a, 0x12, 0x8a, 0x79, 0xac, - 0x32, 0x92, 0xe3, 0x8a, 0x13, 0x51, 0xbb, 0x4e, 0x24, 0x53, 0x36, 0x7f, 0xaa, 0x32, 0xae, 0xa7, - 0xf8, 0xa8, 0x4d, 0x83, 0x83, 0xd9, 0x2e, 0x57, 0x1e, 0xd5, 0xc0, 0x04, 0x09, 0xc5, 0xbc, 0x56, - 0x30, 0x29, 0x3f, 0xd6, 0x0d, 0x4c, 0x2a, 0xb7, 0x69, 0xc0, 0x47, 0xc1, 0x48, 0x0d, 0x1f, 0x5f, - 0x27, 0x6e, 0x35, 0x3c, 0x30, 0xc7, 0xe7, 0x8d, 0xc5, 0x81, 0xd2, 0x78, 0xb3, 0x91, 0x1f, 0xd9, - 0x8c, 0x89, 0x28, 0xe1, 0x73, 0x61, 0xdb, 0x95, 0xc2, 0xe7, 0x35, 0xe1, 0x98, 0x88, 0x12, 0x3e, - 0xeb, 0x5e, 0x7c, 0x1c, 0xb2, 0xcd, 0x65, 0x4e, 0xb4, 0x5e, 0x73, 0x77, 0x04, 0x19, 0xc5, 0x7c, - 0xb8, 0x08, 0x72, 0x35, 0x7c, 0xcc, 0x47, 0x12, 0xe6, 0x24, 0x37, 0xcb, 0x07, 0xe3, 0x9b, 0x92, - 0x86, 0x14, 0x97, 0x4b, 0xda, 0xae, 0x90, 0x9c, 0xd2, 0x24, 0x25, 0x0d, 0x29, 0x2e, 0x4b, 0xe2, - 0xc8, 0xb5, 0x6f, 0x45, 0x44, 0x08, 0x43, 0x1e, 0x19, 0x95, 0xc4, 0x37, 0x13, 0x16, 0xd2, 0xe5, - 0x60, 0x11, 0x80, 0x5a, 0xe4, 0x84, 0xb6, 0xef, 0x90, 0xed, 0x7d, 0xf3, 0x02, 0x8f, 0x3f, 0x6f, - 0xfa, 0x37, 0x15, 0x15, 0x69, 0x12, 0x90, 0x80, 0x41, 0xe2, 0x46, 0x35, 0xf3, 0x22, 0x3f, 0xd8, - 0x7b, 0x92, 0x82, 0x6a, 0xe7, 0xac, 0xbb, 0x51, 0x0d, 0x71, 0xf3, 0xf0, 0x69, 0x30, 0x5e, 0xc3, - 0xc7, 0xac, 0x1c, 0x90, 0x20, 0xb4, 0x09, 0x35, 0xa7, 0xf9, 0xcb, 0x4f, 0xb1, 0x6e, 0x77, 0x53, - 0x67, 0xa0, 0x56, 0x39, 0xae, 0x68, 0xbb, 0x9a, 0xe2, 0x8c, 0xa6, 0xa8, 0x33, 0x50, 0xab, 0x1c, - 0x8b, 0x74, 0x40, 0x6e, 0x45, 0x76, 0x40, 0x2c, 0xf3, 0x3e, 0xde, 0x20, 0xcb, 0x8f, 0x15, 0x82, - 0x86, 0x14, 0x17, 0xd6, 0xe3, 0xd9, 0x95, 0xc9, 0xb7, 0xe1, 0xcd, 0xde, 0x56, 0xf2, 0xed, 0x60, - 0x25, 0x08, 0xf0, 0x89, 0x38, 0x69, 0xf4, 0xa9, 0x15, 0xa4, 0x20, 0x8b, 0x1d, 0x67, 0x7b, 0xdf, - 0xbc, 0xc4, 0x63, 0xdf, 0xeb, 0x13, 0x44, 0x55, 0x9d, 0x15, 0x06, 0x82, 0x04, 0x16, 0x03, 0xf5, - 0x5c, 0x96, 0x1a, 0xb3, 0xfd, 0x05, 0xdd, 0x66, 0x20, 0x48, 0x60, 0xf1, 0x37, 0x75, 0x4f, 0xb6, - 0xf7, 0xcd, 0xfb, 0xfb, 0xfc, 0xa6, 0x0c, 0x04, 0x09, 0x2c, 0x68, 0x83, 0x01, 0xd7, 0x0b, 0xcd, - 0xcb, 0x7d, 0x39, 0x9e, 0xf9, 0x81, 0xb3, 0xe5, 0x85, 0x88, 0x61, 0xc0, 0x9f, 0x1a, 0x00, 0xf8, - 0x49, 0x8a, 0x3e, 0xd0, 0x93, 0x91, 0x48, 0x0a, 0xb2, 0x98, 0xe4, 0xf6, 0xba, 0x1b, 0x06, 0x27, - 0xc9, 0xf5, 0x48, 0xdb, 0x03, 0x9a, 0x17, 0xf0, 0x57, 0x06, 0xb8, 0xa8, 0xb7, 0xc9, 0xca, 0xbd, - 0x39, 0x1e, 0x91, 0x1b, 0xbd, 0x4e, 0xf3, 0x92, 0xe7, 0x39, 0x25, 0xb3, 0xd9, 0xc8, 0x5f, 0x5c, - 0xe9, 0x80, 0x8a, 0x3a, 0xfa, 0x02, 0x7f, 0x67, 0x80, 0x29, 0x59, 0x45, 0x35, 0x0f, 0xf3, 0x3c, - 0x80, 0xa4, 0xd7, 0x01, 0x4c, 0xe3, 0x88, 0x38, 0xaa, 0x8f, 0xec, 0x6d, 0x7c, 0xd4, 0xee, 0x1a, - 0xfc, 0xa3, 0x01, 0xc6, 0x2c, 0xe2, 0x13, 0xd7, 0x22, 0x6e, 0x85, 0xf9, 0x3a, 0xdf, 0x93, 0x91, - 0x45, 0xda, 0xd7, 0x35, 0x0d, 0x42, 0xb8, 0x59, 0x94, 0x6e, 0x8e, 0xe9, 0xac, 0xd3, 0x46, 0x7e, - 0x26, 0x51, 0xd5, 0x39, 0xa8, 0xc5, 0x4b, 0xf8, 0x9e, 0x01, 0x26, 0x92, 0x05, 0x10, 0x47, 0xca, - 0x42, 0x1f, 0xf3, 0x80, 0xb7, 0xaf, 0x2b, 0xad, 0x80, 0x28, 0xed, 0x01, 0xfc, 0xbd, 0xc1, 0x3a, - 0xb5, 0xf8, 0xde, 0x47, 0xcd, 0x02, 0x8f, 0xe5, 0x1b, 0x3d, 0x8f, 0xa5, 0x42, 0x10, 0xa1, 0xbc, - 0x9a, 0xb4, 0x82, 0x8a, 0x73, 0xda, 0xc8, 0x4f, 0xeb, 0x91, 0x54, 0x0c, 0xa4, 0x7b, 0x08, 0x7f, - 0x60, 0x80, 0x31, 0x92, 0x74, 0xdc, 0xd4, 0x7c, 0xb0, 0x27, 0x41, 0xec, 0xd8, 0xc4, 0x8b, 0x9b, - 0xba, 0xc6, 0xa2, 0xa8, 0x05, 0x9b, 0x75, 0x90, 0xe4, 0x18, 0xd7, 0x7c, 0x87, 0x98, 0xff, 0xd7, - 0xe3, 0x0e, 0x72, 0x5d, 0xd8, 0x45, 0x31, 0x00, 0xbc, 0x0a, 0x72, 0x6e, 0xe4, 0x38, 0x78, 0xcf, - 0x21, 0xe6, 0x43, 0xbc, 0x17, 0x51, 0x23, 0xd9, 0x2d, 0x49, 0x47, 0x4a, 0x02, 0xee, 0x83, 0xf9, - 0xe3, 0x17, 0xd5, 0xdf, 0x93, 0x3a, 0x0e, 0x0d, 0xcd, 0x2b, 0xdc, 0xca, 0x6c, 0xb3, 0x91, 0x9f, - 0xd9, 0xed, 0x3c, 0x56, 0xbc, 0xa3, 0x0d, 0xf8, 0x0a, 0xb8, 0x5f, 0x93, 0x59, 0xaf, 0xed, 0x11, - 0xcb, 0x22, 0x56, 0x7c, 0x71, 0x33, 0xff, 0x5f, 0x0c, 0x2e, 0xe3, 0x0d, 0xbe, 0x9b, 0x16, 0x40, - 0xb7, 0xd3, 0x86, 0xd7, 0xc1, 0x8c, 0xc6, 0xde, 0x70, 0xc3, 0xed, 0xa0, 0x1c, 0x06, 0xb6, 0x5b, - 0x35, 0x17, 0xb9, 0xdd, 0x8b, 0xf1, 0x8e, 0xdc, 0xd5, 0x78, 0xa8, 0x8b, 0x0e, 0xfc, 0x72, 0x8b, - 0x35, 0xfe, 0x09, 0x0d, 0xfb, 0x2f, 0x92, 0x13, 0x6a, 0x3e, 0xcc, 0xbb, 0x13, 0xbe, 0xd8, 0xbb, - 0x1a, 0x1d, 0x75, 0x91, 0x87, 0x5f, 0x02, 0x17, 0x52, 0x1c, 0x76, 0x45, 0x31, 0x1f, 0x11, 0x77, - 0x0d, 0xd6, 0xcf, 0xee, 0xc6, 0x44, 0xd4, 0x49, 0x12, 0x7e, 0x11, 0x40, 0x8d, 0xbc, 0x89, 0x7d, - 0xae, 0xff, 0xa8, 0xb8, 0xf6, 0xb0, 0x15, 0xdd, 0x95, 0x34, 0xd4, 0x41, 0x0e, 0xfe, 0xdc, 0x68, - 0x79, 0x93, 0xe4, 0x76, 0x4c, 0xcd, 0xab, 0x7c, 0xff, 0x6e, 0x9e, 0x31, 0x0b, 0xb5, 0xef, 0x20, - 0x91, 0x43, 0xb4, 0x30, 0x6b, 0x50, 0xa8, 0x8b, 0x0b, 0xb3, 0xec, 0x86, 0x9e, 0xaa, 0xf0, 0x70, - 0x12, 0x0c, 0x1c, 0x12, 0xf9, 0xaf, 0x0a, 0xc4, 0x7e, 0x42, 0x0b, 0x64, 0xeb, 0xd8, 0x89, 0xe2, - 0x21, 0x43, 0x8f, 0xbb, 0x03, 0x24, 0x8c, 0x3f, 0x9b, 0x79, 0xc6, 0x98, 0x7d, 0xdf, 0x00, 0x33, - 0x9d, 0x0f, 0x9e, 0x7b, 0xea, 0xd6, 0x2f, 0x0c, 0x30, 0xd5, 0x76, 0xc6, 0x74, 0xf0, 0xe8, 0x56, - 0xab, 0x47, 0xaf, 0xf4, 0xfa, 0xb0, 0x10, 0x9b, 0x83, 0x77, 0xc8, 0xba, 0x7b, 0x3f, 0x36, 0xc0, - 0x64, 0xba, 0x6c, 0xdf, 0xcb, 0x78, 0x15, 0xde, 0xcf, 0x80, 0x99, 0xce, 0x8d, 0x3d, 0x0c, 0xd4, - 0x04, 0xa3, 0x3f, 0x93, 0xa0, 0x4e, 0x53, 0xe3, 0x77, 0x0c, 0x30, 0xfa, 0x96, 0x92, 0x8b, 0xbf, - 0xba, 0xf7, 0x7c, 0x06, 0x15, 0x9f, 0x93, 0x09, 0x83, 0x22, 0x1d, 0xb7, 0xf0, 0x07, 0x03, 0x4c, - 0x77, 0x6c, 0x00, 0xe0, 0x15, 0x30, 0x84, 0x1d, 0xc7, 0x3b, 0x12, 0xa3, 0x44, 0xed, 0x1b, 0xc1, - 0x0a, 0xa7, 0x22, 0xc9, 0xd5, 0xa2, 0x97, 0xf9, 0xac, 0xa2, 0x57, 0xf8, 0xb3, 0x01, 0x2e, 0xdf, - 0x2e, 0x13, 0xef, 0xc9, 0x92, 0x2e, 0x82, 0x9c, 0x6c, 0xde, 0x4f, 0xf8, 0x72, 0xca, 0x52, 0x2c, - 0x8b, 0x06, 0xff, 0xa3, 0x99, 0xf8, 0x55, 0xf8, 0xc0, 0x00, 0x93, 0x65, 0x12, 0xd4, 0xed, 0x0a, - 0x41, 0x64, 0x9f, 0x04, 0xc4, 0xad, 0x10, 0xb8, 0x04, 0x46, 0xf8, 0xe7, 0x6e, 0x1f, 0x57, 0xe2, - 0x4f, 0x37, 0x53, 0x32, 0xe4, 0x23, 0x5b, 0x31, 0x03, 0x25, 0x32, 0xea, 0x33, 0x4f, 0xa6, 0xeb, - 0x67, 0x9e, 0xcb, 0x60, 0xd0, 0x4f, 0x06, 0xd1, 0x39, 0xc6, 0xe5, 0xb3, 0x67, 0x4e, 0xe5, 0x5c, - 0x2f, 0x08, 0xf9, 0x74, 0x2d, 0x2b, 0xb9, 0x5e, 0x10, 0x22, 0x4e, 0x2d, 0xbc, 0x06, 0xce, 0xb7, - 0x96, 0x71, 0x86, 0x17, 0x44, 0x4e, 0xdb, 0x67, 0x25, 0xc6, 0x43, 0x9c, 0xa3, 0xff, 0xdb, 0x25, - 0x73, 0x87, 0x7f, 0xbb, 0xfc, 0xc5, 0x00, 0x9d, 0xfe, 0x71, 0x06, 0x2f, 0x89, 0xf9, 0xa5, 0x36, - 0x14, 0x8c, 0x67, 0x97, 0xb0, 0x0e, 0x86, 0xa9, 0x08, 0x9a, 0x5c, 0xd4, 0xed, 0x33, 0x2e, 0x6a, - 0x7a, 0x09, 0x44, 0xe3, 0x14, 0x53, 0x63, 0x30, 0xb6, 0xae, 0x15, 0x5c, 0x8a, 0x5c, 0x4b, 0x8e, - 0xb4, 0xc7, 0xc4, 0xba, 0xae, 0xae, 0x08, 0x1a, 0x52, 0xdc, 0x52, 0xe5, 0xc3, 0x4f, 0xe6, 0xce, - 0x7d, 0xf4, 0xc9, 0xdc, 0xb9, 0x8f, 0x3f, 0x99, 0x3b, 0xf7, 0xad, 0xe6, 0x9c, 0xf1, 0x61, 0x73, - 0xce, 0xf8, 0xa8, 0x39, 0x67, 0x7c, 0xdc, 0x9c, 0x33, 0xfe, 0xd1, 0x9c, 0x33, 0x7e, 0xf2, 0xcf, - 0xb9, 0x73, 0x5f, 0x7b, 0xee, 0x4c, 0x7f, 0xf2, 0xfe, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xcb, - 0x02, 0xc9, 0x93, 0x3d, 0x2e, 0x00, 0x00, + // 3105 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0xcd, 0x73, 0x23, 0x47, + 0xd9, 0xdf, 0x91, 0x2c, 0x5b, 0x6e, 0x7b, 0x77, 0xed, 0xde, 0xb5, 0x33, 0xeb, 0x6c, 0x2c, 0x5b, + 0x79, 0xb3, 0xaf, 0x93, 0x6c, 0xe4, 0xc4, 0x6f, 0xf2, 0x26, 0xa4, 0x48, 0x51, 0x96, 0xed, 0x0d, + 0x4e, 0xd6, 0x1f, 0xb4, 0x76, 0x13, 0x43, 0x3e, 0xc7, 0x9a, 0xb6, 0x3c, 0xf1, 0x68, 0x66, 0xb6, + 0x7b, 0x46, 0xb6, 0x2b, 0x40, 0xf1, 0x51, 0x29, 0x28, 0x0a, 0x08, 0x45, 0x72, 0xa1, 0x80, 0x43, + 0xa0, 0xe0, 0xc0, 0x01, 0x0e, 0x70, 0x83, 0x3f, 0x20, 0xc7, 0x14, 0xc5, 0x21, 0x07, 0x4a, 0x10, + 0x71, 0xe5, 0x48, 0x15, 0x55, 0x3e, 0x51, 0xfd, 0x31, 0x3d, 0xad, 0x91, 0xb4, 0xbb, 0x15, 0x4b, + 0x59, 0x6e, 0xd2, 0xf3, 0xf5, 0x7b, 0xe6, 0xe9, 0xa7, 0x9f, 0x7e, 0xfa, 0x99, 0x01, 0x7b, 0x07, + 0xcf, 0xd0, 0x92, 0xe3, 0x2f, 0x1e, 0x44, 0xbb, 0x98, 0x78, 0x38, 0xc4, 0x74, 0xb1, 0x81, 0x3d, + 0xdb, 0x27, 0x8b, 0x92, 0x61, 0x05, 0x0e, 0x3e, 0x0a, 0xb1, 0x47, 0x1d, 0xdf, 0xa3, 0x8f, 0x59, + 0x81, 0x43, 0x31, 0x69, 0x60, 0xb2, 0x18, 0x1c, 0xd4, 0x18, 0x8f, 0xb6, 0x0b, 0x2c, 0x36, 0x9e, + 0xd8, 0xc5, 0xa1, 0xf5, 0xc4, 0x62, 0x0d, 0x7b, 0x98, 0x58, 0x21, 0xb6, 0x4b, 0x01, 0xf1, 0x43, + 0x1f, 0x3e, 0x27, 0xcc, 0x95, 0xda, 0xa4, 0xdf, 0x50, 0xe6, 0x4a, 0xc1, 0x41, 0x8d, 0xf1, 0x68, + 0xbb, 0x40, 0x49, 0x9a, 0x9b, 0x79, 0xac, 0xe6, 0x84, 0xfb, 0xd1, 0x6e, 0xa9, 0xea, 0xd7, 0x17, + 0x6b, 0x7e, 0xcd, 0x5f, 0xe4, 0x56, 0x77, 0xa3, 0x3d, 0xfe, 0x8f, 0xff, 0xe1, 0xbf, 0x04, 0xda, + 0xcc, 0x93, 0x89, 0xf3, 0x75, 0xab, 0xba, 0xef, 0x78, 0x98, 0x1c, 0x27, 0x1e, 0xd7, 0x71, 0x68, + 0x2d, 0x36, 0x3a, 0x7c, 0x9c, 0x59, 0xec, 0xa5, 0x45, 0x22, 0x2f, 0x74, 0xea, 0xb8, 0x43, 0xe1, + 0xff, 0xef, 0xa4, 0x40, 0xab, 0xfb, 0xb8, 0x6e, 0xa5, 0xf5, 0x8a, 0x27, 0x06, 0x98, 0x5c, 0xf1, + 0xbd, 0x06, 0x26, 0xec, 0x29, 0x11, 0xbe, 0x15, 0x61, 0x1a, 0xc2, 0x32, 0xc8, 0x46, 0x8e, 0x6d, + 0x1a, 0x73, 0xc6, 0xc2, 0x68, 0xf9, 0xf1, 0x0f, 0x9b, 0x85, 0x33, 0xad, 0x66, 0x21, 0x7b, 0x73, + 0x7d, 0xf5, 0xa4, 0x59, 0x98, 0xef, 0x85, 0x14, 0x1e, 0x07, 0x98, 0x96, 0x6e, 0xae, 0xaf, 0x22, + 0xa6, 0x0c, 0x9f, 0x07, 0x93, 0x36, 0xa6, 0x0e, 0xc1, 0xf6, 0xf2, 0xf6, 0xfa, 0x4b, 0xc2, 0xbe, + 0x99, 0xe1, 0x16, 0x2f, 0x49, 0x8b, 0x93, 0xab, 0x69, 0x01, 0xd4, 0xa9, 0x03, 0x77, 0xc0, 0x88, + 0xbf, 0xfb, 0x16, 0xae, 0x86, 0xd4, 0xcc, 0xce, 0x65, 0x17, 0xc6, 0x96, 0x1e, 0x2b, 0x25, 0x2b, + 0xa8, 0x5c, 0xe0, 0xcb, 0x26, 0x1f, 0xb6, 0x84, 0xac, 0xc3, 0xb5, 0x78, 0xe5, 0xca, 0xe7, 0x25, + 0xda, 0xc8, 0x96, 0xb0, 0x82, 0x62, 0x73, 0xc5, 0x5f, 0x66, 0x00, 0xd4, 0x1f, 0x9e, 0x06, 0xbe, + 0x47, 0x71, 0x5f, 0x9e, 0x9e, 0x82, 0x89, 0x2a, 0xb7, 0x1c, 0x62, 0x5b, 0xe2, 0x9a, 0x99, 0x4f, + 0xe3, 0xbd, 0x29, 0xf1, 0x27, 0x56, 0x52, 0xe6, 0x50, 0x07, 0x00, 0xbc, 0x01, 0x86, 0x09, 0xa6, + 0x91, 0x1b, 0x9a, 0xd9, 0x39, 0x63, 0x61, 0x6c, 0xe9, 0x6a, 0x4f, 0x28, 0x9e, 0xdf, 0x2c, 0xf9, + 0x4a, 0x8d, 0x27, 0x4a, 0x95, 0xd0, 0x0a, 0x23, 0x5a, 0x3e, 0x27, 0x91, 0x86, 0x11, 0xb7, 0x81, + 0xa4, 0xad, 0xe2, 0x77, 0x33, 0x60, 0x42, 0x8f, 0x52, 0xc3, 0xc1, 0x87, 0xf0, 0x10, 0x8c, 0x10, + 0x91, 0x2c, 0x3c, 0x4e, 0x63, 0x4b, 0xdb, 0xa5, 0x53, 0x6d, 0xab, 0x52, 0x47, 0x12, 0x96, 0xc7, + 0xd8, 0x9a, 0xc9, 0x3f, 0x28, 0x46, 0x83, 0x6f, 0x83, 0x3c, 0x91, 0x0b, 0xc5, 0xb3, 0x69, 0x6c, + 0xe9, 0x4b, 0x7d, 0x44, 0x16, 0x86, 0xcb, 0xe3, 0xad, 0x66, 0x21, 0x1f, 0xff, 0x43, 0x0a, 0xb0, + 0xf8, 0x5e, 0x06, 0xcc, 0xae, 0x44, 0x34, 0xf4, 0xeb, 0x08, 0x53, 0x3f, 0x22, 0x55, 0xbc, 0xe2, + 0xbb, 0x51, 0xdd, 0x5b, 0xc5, 0x7b, 0x8e, 0xe7, 0x84, 0x2c, 0x5b, 0xe7, 0xc0, 0x90, 0x67, 0xd5, + 0xb1, 0xcc, 0x9e, 0x71, 0x19, 0xd3, 0xa1, 0x4d, 0xab, 0x8e, 0x11, 0xe7, 0x30, 0x09, 0x96, 0x2c, + 0x72, 0x2f, 0x28, 0x89, 0x1b, 0xc7, 0x01, 0x46, 0x9c, 0x03, 0xaf, 0x80, 0xe1, 0x3d, 0x9f, 0xd4, + 0x2d, 0xb1, 0x8e, 0xa3, 0xc9, 0xca, 0x5c, 0xe3, 0x54, 0x24, 0xb9, 0xf0, 0x29, 0x30, 0x66, 0x63, + 0x5a, 0x25, 0x4e, 0xc0, 0xa0, 0xcd, 0x21, 0x2e, 0x7c, 0x41, 0x0a, 0x8f, 0xad, 0x26, 0x2c, 0xa4, + 0xcb, 0xc1, 0xab, 0x20, 0x1f, 0x10, 0xc7, 0x27, 0x4e, 0x78, 0x6c, 0xe6, 0xe6, 0x8c, 0x85, 0x5c, + 0x79, 0x42, 0xea, 0xe4, 0xb7, 0x25, 0x1d, 0x29, 0x09, 0x38, 0x07, 0xf2, 0x2f, 0x54, 0xb6, 0x36, + 0xb7, 0xad, 0x70, 0xdf, 0x1c, 0xe6, 0x08, 0x43, 0x4c, 0x1a, 0x29, 0x6a, 0xf1, 0xaf, 0x19, 0x60, + 0xa6, 0xa3, 0x12, 0x87, 0x14, 0x5e, 0x03, 0x79, 0x1a, 0xb2, 0x8a, 0x53, 0x3b, 0x96, 0x31, 0x79, + 0x24, 0x06, 0xab, 0x48, 0xfa, 0x49, 0xb3, 0x30, 0x9d, 0x68, 0xc4, 0x54, 0x1e, 0x0f, 0xa5, 0x0b, + 0x7f, 0x6e, 0x80, 0x0b, 0x87, 0x78, 0x77, 0xdf, 0xf7, 0x0f, 0x56, 0x5c, 0x07, 0x7b, 0xe1, 0x8a, + 0xef, 0xed, 0x39, 0x35, 0x99, 0x03, 0xe8, 0x94, 0x39, 0xf0, 0x72, 0xa7, 0xe5, 0xf2, 0x7d, 0xad, + 0x66, 0xe1, 0x42, 0x17, 0x06, 0xea, 0xe6, 0x07, 0xdc, 0x01, 0x66, 0x35, 0xb5, 0x49, 0x64, 0x01, + 0x13, 0x65, 0x6b, 0xb4, 0x7c, 0xb9, 0xd5, 0x2c, 0x98, 0x2b, 0x3d, 0x64, 0x50, 0x4f, 0xed, 0xe2, + 0xb7, 0xb3, 0xe9, 0xf0, 0x6a, 0xe9, 0xf6, 0x26, 0xc8, 0xb3, 0x6d, 0x6c, 0x5b, 0xa1, 0x25, 0x37, + 0xe2, 0xe3, 0x77, 0xb7, 0xe9, 0x45, 0xcd, 0xd8, 0xc0, 0xa1, 0x55, 0x86, 0x72, 0x41, 0x40, 0x42, + 0x43, 0xca, 0x2a, 0xfc, 0x1a, 0x18, 0xa2, 0x01, 0xae, 0xca, 0x40, 0xbf, 0x72, 0xda, 0xcd, 0xd6, + 0xe3, 0x41, 0x2a, 0x01, 0xae, 0x26, 0x7b, 0x81, 0xfd, 0x43, 0x1c, 0x16, 0xbe, 0x63, 0x80, 0x61, + 0xca, 0x0b, 0x94, 0x2c, 0x6a, 0xaf, 0x0d, 0xca, 0x83, 0x54, 0x15, 0x14, 0xff, 0x91, 0x04, 0x2f, + 0xfe, 0x2b, 0x03, 0xe6, 0x7b, 0xa9, 0xae, 0xf8, 0x9e, 0x2d, 0x96, 0x63, 0x5d, 0xee, 0x6d, 0x91, + 0xe9, 0x4f, 0xe9, 0x7b, 0xfb, 0xa4, 0x59, 0x78, 0xe8, 0x8e, 0x06, 0xb4, 0x22, 0xf0, 0x39, 0xf5, + 0xdc, 0xa2, 0x50, 0xcc, 0xb7, 0x3b, 0x76, 0xd2, 0x2c, 0x9c, 0x57, 0x6a, 0xed, 0xbe, 0xc2, 0x06, + 0x80, 0xae, 0x45, 0xc3, 0x1b, 0xc4, 0xf2, 0xa8, 0x30, 0xeb, 0xd4, 0xb1, 0x0c, 0xdf, 0x23, 0x77, + 0x97, 0x1e, 0x4c, 0xa3, 0x3c, 0x23, 0x21, 0xe1, 0xf5, 0x0e, 0x6b, 0xa8, 0x0b, 0x02, 0xab, 0x5b, + 0x04, 0x5b, 0x54, 0x95, 0x22, 0xed, 0x44, 0x61, 0x54, 0x24, 0xb9, 0xf0, 0x61, 0x30, 0x52, 0xc7, + 0x94, 0x5a, 0x35, 0xcc, 0xeb, 0xcf, 0x68, 0x72, 0x44, 0x6f, 0x08, 0x32, 0x8a, 0xf9, 0xac, 0x3f, + 0xb9, 0xdc, 0x2b, 0x6a, 0xd7, 0x1d, 0x1a, 0xc2, 0x57, 0x3b, 0x36, 0x40, 0xe9, 0xee, 0x9e, 0x90, + 0x69, 0xf3, 0xf4, 0x57, 0xc5, 0x2f, 0xa6, 0x68, 0xc9, 0xff, 0x55, 0x90, 0x73, 0x42, 0x5c, 0x8f, + 0xcf, 0xee, 0x97, 0x07, 0x94, 0x7b, 0xe5, 0xb3, 0xd2, 0x87, 0xdc, 0x3a, 0x43, 0x43, 0x02, 0xb4, + 0xf8, 0xab, 0x0c, 0x78, 0xa0, 0x97, 0x0a, 0x3b, 0x50, 0x28, 0x8b, 0x78, 0xe0, 0x46, 0xc4, 0x72, + 0x65, 0xc6, 0xa9, 0x88, 0x6f, 0x73, 0x2a, 0x92, 0x5c, 0x56, 0xf2, 0xa9, 0xe3, 0xd5, 0x22, 0xd7, + 0x22, 0x32, 0x9d, 0xd4, 0x53, 0x57, 0x24, 0x1d, 0x29, 0x09, 0x58, 0x02, 0x80, 0xee, 0xfb, 0x24, + 0xe4, 0x18, 0xb2, 0x7a, 0x9d, 0x63, 0x05, 0xa2, 0xa2, 0xa8, 0x48, 0x93, 0x60, 0x27, 0xda, 0x81, + 0xe3, 0xd9, 0x72, 0xd5, 0xd5, 0x2e, 0x7e, 0xd1, 0xf1, 0x6c, 0xc4, 0x39, 0x0c, 0xdf, 0x75, 0x68, + 0xc8, 0x28, 0x72, 0xc9, 0xdb, 0xa2, 0xce, 0x25, 0x95, 0x04, 0xc3, 0xaf, 0xb2, 0xaa, 0xef, 0x13, + 0x07, 0x53, 0x73, 0x38, 0xc1, 0x5f, 0x51, 0x54, 0xa4, 0x49, 0x14, 0xff, 0x99, 0xef, 0x9d, 0x24, + 0xac, 0x94, 0xc0, 0x07, 0x41, 0xae, 0x46, 0xfc, 0x28, 0x90, 0x51, 0x52, 0xd1, 0x7e, 0x9e, 0x11, + 0x91, 0xe0, 0xb1, 0xac, 0x6c, 0xb4, 0xb5, 0xa9, 0x2a, 0x2b, 0xe3, 0xe6, 0x34, 0xe6, 0xc3, 0x6f, + 0x1a, 0x20, 0xe7, 0xc9, 0xe0, 0xb0, 0x94, 0x7b, 0x75, 0x40, 0x79, 0xc1, 0xc3, 0x9b, 0xb8, 0x2b, + 0x22, 0x2f, 0x90, 0xe1, 0x93, 0x20, 0x47, 0xab, 0x7e, 0x80, 0x65, 0xd4, 0x67, 0x63, 0xa1, 0x0a, + 0x23, 0x9e, 0x34, 0x0b, 0x67, 0x63, 0x73, 0x9c, 0x80, 0x84, 0x30, 0xfc, 0x8e, 0x01, 0x40, 0xc3, + 0x72, 0x1d, 0xdb, 0xe2, 0x2d, 0x43, 0x8e, 0xbb, 0xdf, 0xdf, 0xb4, 0x7e, 0x49, 0x99, 0x17, 0x8b, + 0x96, 0xfc, 0x47, 0x1a, 0x34, 0x7c, 0xd7, 0x00, 0xe3, 0x34, 0xda, 0x25, 0x52, 0x8b, 0xf2, 0xe6, + 0x62, 0x6c, 0xe9, 0xcb, 0x7d, 0xf5, 0xa5, 0xa2, 0x01, 0x94, 0x27, 0x5a, 0xcd, 0xc2, 0xb8, 0x4e, + 0x41, 0x6d, 0x0e, 0xc0, 0xef, 0x1b, 0x20, 0xdf, 0x88, 0xcf, 0xec, 0x11, 0xbe, 0xe1, 0x5f, 0x1f, + 0xd0, 0xc2, 0xca, 0x8c, 0x4a, 0x76, 0x81, 0xea, 0x03, 0x94, 0x07, 0xf0, 0x8f, 0x06, 0x30, 0x2d, + 0x5b, 0x14, 0x78, 0xcb, 0xdd, 0x26, 0x8e, 0x17, 0x62, 0x22, 0xfa, 0x4d, 0x6a, 0xe6, 0xb9, 0x7b, + 0xfd, 0x3d, 0x0b, 0xd3, 0xbd, 0x6c, 0x79, 0x4e, 0x7a, 0x67, 0x2e, 0xf7, 0x70, 0x03, 0xf5, 0x74, + 0x90, 0x27, 0x5a, 0xd2, 0xd2, 0x98, 0xa3, 0x03, 0x48, 0xb4, 0xa4, 0x97, 0x92, 0xd5, 0x21, 0xe9, + 0xa0, 0x34, 0x68, 0xb8, 0x05, 0xa6, 0x02, 0x82, 0x39, 0xc0, 0x4d, 0xef, 0xc0, 0xf3, 0x0f, 0xbd, + 0x6b, 0x0e, 0x76, 0x6d, 0x6a, 0x82, 0x39, 0x63, 0x21, 0x5f, 0xbe, 0xd4, 0x6a, 0x16, 0xa6, 0xb6, + 0xbb, 0x09, 0xa0, 0xee, 0x7a, 0xc5, 0x77, 0xb3, 0xe9, 0x5b, 0x40, 0xba, 0x8b, 0x80, 0xef, 0x8b, + 0xa7, 0x17, 0xb1, 0xa1, 0xa6, 0xc1, 0x57, 0xeb, 0xcd, 0x01, 0x25, 0x93, 0x6a, 0x03, 0x92, 0x4e, + 0x4e, 0x91, 0x28, 0xd2, 0xfc, 0x80, 0x3f, 0x31, 0xc0, 0x59, 0xab, 0x5a, 0xc5, 0x41, 0x88, 0x6d, + 0x51, 0xdc, 0x33, 0x9f, 0x41, 0xfd, 0x9a, 0x92, 0x5e, 0x9d, 0x5d, 0xd6, 0xa1, 0x51, 0xbb, 0x27, + 0xf0, 0x59, 0x70, 0x8e, 0x86, 0x3e, 0xc1, 0x76, 0xaa, 0x6d, 0x86, 0xad, 0x66, 0xe1, 0x5c, 0xa5, + 0x8d, 0x83, 0x52, 0x92, 0xc5, 0xbf, 0xe5, 0x40, 0xe1, 0x0e, 0x5b, 0xed, 0x2e, 0x2e, 0x66, 0x57, + 0xc0, 0x30, 0x7f, 0x5c, 0x9b, 0x47, 0x25, 0xaf, 0xb5, 0x82, 0x9c, 0x8a, 0x24, 0x97, 0x1d, 0x14, + 0x0c, 0x9f, 0xb5, 0x2f, 0x59, 0x2e, 0xa8, 0x0e, 0x8a, 0x8a, 0x20, 0xa3, 0x98, 0x0f, 0x97, 0x00, + 0xb0, 0x71, 0x40, 0x30, 0x3b, 0xac, 0x6c, 0x73, 0x84, 0x4b, 0xab, 0x45, 0x5a, 0x55, 0x1c, 0xa4, + 0x49, 0xc1, 0x6b, 0x00, 0xc6, 0xff, 0x1c, 0xdf, 0x7b, 0xd9, 0x22, 0x9e, 0xe3, 0xd5, 0xcc, 0x3c, + 0x77, 0x7b, 0x9a, 0x75, 0x63, 0xab, 0x1d, 0x5c, 0xd4, 0x45, 0x03, 0xbe, 0x0d, 0x86, 0xc5, 0xd0, + 0x87, 0x9f, 0x10, 0x03, 0xac, 0xf2, 0x80, 0xc7, 0x88, 0x43, 0x21, 0x09, 0xd9, 0x59, 0xdd, 0x73, + 0xf7, 0xba, 0xba, 0xdf, 0xb6, 0x9c, 0x0e, 0xff, 0x97, 0x97, 0xd3, 0xe2, 0xbf, 0x8d, 0x74, 0xcd, + 0xd1, 0x1e, 0xb5, 0x52, 0xb5, 0x5c, 0x0c, 0x57, 0xc1, 0x04, 0xbb, 0x31, 0x21, 0x1c, 0xb8, 0x4e, + 0xd5, 0xa2, 0xfc, 0xc2, 0x2e, 0x92, 0x5d, 0xcd, 0x90, 0x2a, 0x29, 0x3e, 0xea, 0xd0, 0x80, 0x2f, + 0x00, 0x28, 0x6e, 0x11, 0x6d, 0x76, 0x44, 0x43, 0xa4, 0xee, 0x03, 0x95, 0x0e, 0x09, 0xd4, 0x45, + 0x0b, 0xae, 0x80, 0x49, 0xd7, 0xda, 0xc5, 0x6e, 0x05, 0xbb, 0xb8, 0x1a, 0xfa, 0x84, 0x9b, 0x12, + 0x23, 0x8d, 0xa9, 0x56, 0xb3, 0x30, 0x79, 0x3d, 0xcd, 0x44, 0x9d, 0xf2, 0xc5, 0xf9, 0xf4, 0xd6, + 0xd6, 0x1f, 0x5c, 0xdc, 0xcd, 0x3e, 0xc8, 0x80, 0x99, 0xde, 0x99, 0x01, 0xbf, 0x95, 0x5c, 0x21, + 0xc5, 0x0d, 0xe1, 0xf5, 0x41, 0x65, 0xa1, 0xbc, 0x43, 0x82, 0xce, 0xfb, 0x23, 0xfc, 0x3a, 0x6b, + 0xd7, 0x2c, 0x37, 0x1e, 0x5a, 0xbd, 0x36, 0x30, 0x17, 0x18, 0x48, 0x79, 0x54, 0x74, 0x82, 0x96, + 0xcb, 0x1b, 0x3f, 0xcb, 0xc5, 0xc5, 0xdf, 0x18, 0xe9, 0x29, 0x42, 0xb2, 0x83, 0xe1, 0x0f, 0x0c, + 0x70, 0xde, 0x0f, 0xb0, 0xb7, 0xbc, 0xbd, 0xfe, 0xd2, 0xff, 0x89, 0x9d, 0x2c, 0x43, 0xb5, 0x79, + 0x4a, 0x3f, 0x5f, 0xa8, 0x6c, 0x6d, 0x0a, 0x83, 0xdb, 0xc4, 0x0f, 0x68, 0xf9, 0x42, 0xab, 0x59, + 0x38, 0xbf, 0xd5, 0x0e, 0x85, 0xd2, 0xd8, 0xc5, 0x3a, 0x98, 0x5a, 0x3b, 0x0a, 0x31, 0xf1, 0x2c, + 0x77, 0xd5, 0xaf, 0x46, 0x75, 0xec, 0x85, 0xc2, 0xd1, 0xd4, 0xc4, 0xcb, 0xb8, 0xcb, 0x89, 0xd7, + 0x03, 0x20, 0x1b, 0x11, 0x57, 0x66, 0xf1, 0x98, 0x9a, 0xe8, 0xa2, 0xeb, 0x88, 0xd1, 0x8b, 0xf3, + 0x60, 0x88, 0xf9, 0x09, 0x2f, 0x81, 0x2c, 0xb1, 0x0e, 0xb9, 0xd5, 0xf1, 0xf2, 0x08, 0x13, 0x41, + 0xd6, 0x21, 0x62, 0xb4, 0xe2, 0x5f, 0xe6, 0xc1, 0xf9, 0xd4, 0xb3, 0xc0, 0x19, 0x90, 0x51, 0x63, + 0x62, 0x20, 0x8d, 0x66, 0xd6, 0x57, 0x51, 0xc6, 0xb1, 0xe1, 0xd3, 0xaa, 0xf8, 0x0a, 0xd0, 0x82, + 0x3a, 0x4b, 0x38, 0x95, 0xf5, 0xe7, 0x89, 0x39, 0xe6, 0x48, 0x5c, 0x38, 0x99, 0x0f, 0x78, 0x4f, + 0xee, 0x12, 0xe1, 0x03, 0xde, 0x43, 0x8c, 0xf6, 0x69, 0xc7, 0x7d, 0xf1, 0xbc, 0x31, 0x77, 0x17, + 0xf3, 0xc6, 0xe1, 0xdb, 0xce, 0x1b, 0x1f, 0x04, 0xb9, 0xd0, 0x09, 0x5d, 0xcc, 0x0f, 0x32, 0xed, + 0x1a, 0x75, 0x83, 0x11, 0x91, 0xe0, 0xc1, 0xb7, 0xc0, 0x88, 0x8d, 0xf7, 0xac, 0xc8, 0x0d, 0xf9, + 0x99, 0x35, 0xb6, 0xb4, 0xd2, 0x87, 0x14, 0x12, 0xc3, 0xe0, 0x55, 0x61, 0x17, 0xc5, 0x00, 0xf0, + 0x21, 0x30, 0x52, 0xb7, 0x8e, 0x9c, 0x7a, 0x54, 0xe7, 0x0d, 0xa6, 0x21, 0xc4, 0x36, 0x04, 0x09, + 0xc5, 0x3c, 0x56, 0x19, 0xf1, 0x51, 0xd5, 0x8d, 0xa8, 0xd3, 0xc0, 0x92, 0x29, 0x9b, 0x3f, 0x55, + 0x19, 0xd7, 0x52, 0x7c, 0xd4, 0xa1, 0xc1, 0xc1, 0x1c, 0x8f, 0x2b, 0x8f, 0x69, 0x60, 0x82, 0x84, + 0x62, 0x5e, 0x3b, 0x98, 0x94, 0x1f, 0xef, 0x05, 0x26, 0x95, 0x3b, 0x34, 0xe0, 0xa3, 0x60, 0xb4, + 0x6e, 0x1d, 0x5d, 0xc7, 0x5e, 0x2d, 0xdc, 0x37, 0xcf, 0xce, 0x19, 0x0b, 0xd9, 0xf2, 0xd9, 0x56, + 0xb3, 0x30, 0xba, 0x11, 0x13, 0x51, 0xc2, 0xe7, 0xc2, 0x8e, 0x27, 0x85, 0xcf, 0x69, 0xc2, 0x31, + 0x11, 0x25, 0x7c, 0xd6, 0xbd, 0x04, 0x56, 0xc8, 0x36, 0x97, 0x79, 0xbe, 0xfd, 0x9a, 0xbb, 0x2d, + 0xc8, 0x28, 0xe6, 0xc3, 0x05, 0x90, 0xaf, 0x5b, 0x47, 0x7c, 0x24, 0x61, 0x4e, 0x70, 0xb3, 0x7c, + 0x30, 0xbe, 0x21, 0x69, 0x48, 0x71, 0xb9, 0xa4, 0xe3, 0x09, 0xc9, 0x49, 0x4d, 0x52, 0xd2, 0x90, + 0xe2, 0xb2, 0x24, 0x8e, 0x3c, 0xe7, 0x56, 0x84, 0x85, 0x30, 0xe4, 0x91, 0x51, 0x49, 0x7c, 0x33, + 0x61, 0x21, 0x5d, 0x0e, 0x96, 0x00, 0xa8, 0x47, 0x6e, 0xe8, 0x04, 0x2e, 0xde, 0xda, 0x33, 0x2f, + 0xf0, 0xf8, 0xf3, 0xa6, 0x7f, 0x43, 0x51, 0x91, 0x26, 0x01, 0x31, 0x18, 0xc2, 0x5e, 0x54, 0x37, + 0x2f, 0xf2, 0x83, 0xbd, 0x2f, 0x29, 0xa8, 0x76, 0xce, 0x9a, 0x17, 0xd5, 0x11, 0x37, 0x0f, 0x9f, + 0x06, 0x67, 0xeb, 0xd6, 0x11, 0x2b, 0x07, 0x98, 0x84, 0x0e, 0xa6, 0xe6, 0x14, 0x7f, 0xf8, 0x49, + 0xd6, 0xed, 0x6e, 0xe8, 0x0c, 0xd4, 0x2e, 0xc7, 0x15, 0x1d, 0x4f, 0x53, 0x9c, 0xd6, 0x14, 0x75, + 0x06, 0x6a, 0x97, 0x63, 0x91, 0x26, 0xf8, 0x56, 0xe4, 0x10, 0x6c, 0x9b, 0xf7, 0xf1, 0x06, 0x59, + 0xbe, 0xac, 0x10, 0x34, 0xa4, 0xb8, 0xb0, 0x11, 0xcf, 0xae, 0x4c, 0xbe, 0x0d, 0x6f, 0xf6, 0xb7, + 0x92, 0x6f, 0x91, 0x65, 0x42, 0xac, 0x63, 0x71, 0xd2, 0xe8, 0x53, 0x2b, 0x48, 0x41, 0xce, 0x72, + 0xdd, 0xad, 0x3d, 0xf3, 0x12, 0x8f, 0x7d, 0xbf, 0x4f, 0x10, 0x55, 0x75, 0x96, 0x19, 0x08, 0x12, + 0x58, 0x0c, 0xd4, 0xf7, 0x58, 0x6a, 0xcc, 0x0c, 0x16, 0x74, 0x8b, 0x81, 0x20, 0x81, 0xc5, 0x9f, + 0xd4, 0x3b, 0xde, 0xda, 0x33, 0xef, 0x1f, 0xf0, 0x93, 0x32, 0x10, 0x24, 0xb0, 0xa0, 0x03, 0xb2, + 0x9e, 0x1f, 0x9a, 0x97, 0x07, 0x72, 0x3c, 0xf3, 0x03, 0x67, 0xd3, 0x0f, 0x11, 0xc3, 0x80, 0x3f, + 0x36, 0x00, 0x08, 0x92, 0x14, 0x7d, 0xa0, 0x2f, 0x23, 0x91, 0x14, 0x64, 0x29, 0xc9, 0xed, 0x35, + 0x2f, 0x24, 0xc7, 0xc9, 0xf5, 0x48, 0xdb, 0x03, 0x9a, 0x17, 0xf0, 0x17, 0x06, 0xb8, 0xa8, 0xb7, + 0xc9, 0xca, 0xbd, 0x59, 0x1e, 0x91, 0x1b, 0xfd, 0x4e, 0xf3, 0xb2, 0xef, 0xbb, 0x65, 0xb3, 0xd5, + 0x2c, 0x5c, 0x5c, 0xee, 0x82, 0x8a, 0xba, 0xfa, 0x02, 0x7f, 0x6b, 0x80, 0x49, 0x59, 0x45, 0x35, + 0x0f, 0x0b, 0x3c, 0x80, 0xb8, 0xdf, 0x01, 0x4c, 0xe3, 0x88, 0x38, 0xaa, 0x97, 0xec, 0x1d, 0x7c, + 0xd4, 0xe9, 0x1a, 0xfc, 0x83, 0x01, 0xc6, 0x6d, 0x1c, 0x60, 0xcf, 0xc6, 0x5e, 0x95, 0xf9, 0x3a, + 0xd7, 0x97, 0x91, 0x45, 0xda, 0xd7, 0x55, 0x0d, 0x42, 0xb8, 0x59, 0x92, 0x6e, 0x8e, 0xeb, 0xac, + 0x93, 0x66, 0x61, 0x3a, 0x51, 0xd5, 0x39, 0xa8, 0xcd, 0x4b, 0xf8, 0x9e, 0x01, 0xce, 0x27, 0x0b, + 0x20, 0x8e, 0x94, 0xf9, 0x01, 0xe6, 0x01, 0x6f, 0x5f, 0x97, 0xdb, 0x01, 0x51, 0xda, 0x03, 0xf8, + 0x3b, 0x83, 0x75, 0x6a, 0xf1, 0xbd, 0x8f, 0x9a, 0x45, 0x1e, 0xcb, 0x37, 0xfa, 0x1e, 0x4b, 0x85, + 0x20, 0x42, 0x79, 0x35, 0x69, 0x05, 0x15, 0xe7, 0xa4, 0x59, 0x98, 0xd2, 0x23, 0xa9, 0x18, 0x48, + 0xf7, 0x10, 0x7e, 0xcf, 0x00, 0xe3, 0x38, 0xe9, 0xb8, 0xa9, 0xf9, 0x60, 0x5f, 0x82, 0xd8, 0xb5, + 0x89, 0x17, 0x37, 0x75, 0x8d, 0x45, 0x51, 0x1b, 0x36, 0xeb, 0x20, 0xf1, 0x91, 0x55, 0x0f, 0x5c, + 0x6c, 0xfe, 0x4f, 0x9f, 0x3b, 0xc8, 0x35, 0x61, 0x17, 0xc5, 0x00, 0xf0, 0x2a, 0xc8, 0x7b, 0x91, + 0xeb, 0x5a, 0xbb, 0x2e, 0x36, 0x1f, 0xe2, 0xbd, 0x88, 0x1a, 0xc9, 0x6e, 0x4a, 0x3a, 0x52, 0x12, + 0x70, 0x0f, 0xcc, 0x1d, 0xbd, 0xa8, 0x3e, 0x4f, 0xea, 0x3a, 0x34, 0x34, 0xaf, 0x70, 0x2b, 0x33, + 0xad, 0x66, 0x61, 0x7a, 0xa7, 0xfb, 0x58, 0xf1, 0x8e, 0x36, 0xe0, 0x2b, 0xe0, 0x7e, 0x4d, 0x66, + 0xad, 0xbe, 0x8b, 0x6d, 0x1b, 0xdb, 0xf1, 0xc5, 0xcd, 0xfc, 0x5f, 0x31, 0xb8, 0x8c, 0x37, 0xf8, + 0x4e, 0x5a, 0x00, 0xdd, 0x4e, 0x1b, 0x5e, 0x07, 0xd3, 0x1a, 0x7b, 0xdd, 0x0b, 0xb7, 0x48, 0x25, + 0x24, 0x8e, 0x57, 0x33, 0x17, 0xb8, 0xdd, 0x8b, 0xf1, 0x8e, 0xdc, 0xd1, 0x78, 0xa8, 0x87, 0x0e, + 0xfc, 0x62, 0x9b, 0x35, 0xfe, 0x0a, 0xcd, 0x0a, 0x5e, 0xc4, 0xc7, 0xd4, 0x7c, 0x98, 0x77, 0x27, + 0x7c, 0xb1, 0x77, 0x34, 0x3a, 0xea, 0x21, 0x0f, 0xbf, 0x00, 0x2e, 0xa4, 0x38, 0xec, 0x8a, 0x62, + 0x3e, 0x22, 0xee, 0x1a, 0xac, 0x9f, 0xdd, 0x89, 0x89, 0xa8, 0x9b, 0x24, 0xfc, 0x3c, 0x80, 0x1a, + 0x79, 0xc3, 0x0a, 0xb8, 0xfe, 0xa3, 0xe2, 0xda, 0xc3, 0x56, 0x74, 0x47, 0xd2, 0x50, 0x17, 0x39, + 0xf8, 0x53, 0xa3, 0xed, 0x49, 0x92, 0xdb, 0x31, 0x35, 0xaf, 0xf2, 0xfd, 0xbb, 0x71, 0xca, 0x2c, + 0xd4, 0xde, 0x83, 0x44, 0x2e, 0xd6, 0xc2, 0xac, 0x41, 0xa1, 0x1e, 0x2e, 0xcc, 0xb0, 0x1b, 0x7a, + 0xaa, 0xc2, 0xc3, 0x09, 0x90, 0x3d, 0xc0, 0xf2, 0xab, 0x0a, 0xc4, 0x7e, 0x42, 0x1b, 0xe4, 0x1a, + 0x96, 0x1b, 0xc5, 0x43, 0x86, 0x3e, 0x77, 0x07, 0x48, 0x18, 0x7f, 0x36, 0xf3, 0x8c, 0x31, 0xf3, + 0xbe, 0x01, 0xa6, 0xbb, 0x1f, 0x3c, 0xf7, 0xd4, 0xad, 0x9f, 0x19, 0x60, 0xb2, 0xe3, 0x8c, 0xe9, + 0xe2, 0xd1, 0xad, 0x76, 0x8f, 0x5e, 0xe9, 0xf7, 0x61, 0x21, 0x36, 0x07, 0xef, 0x90, 0x75, 0xf7, + 0x7e, 0x68, 0x80, 0x89, 0x74, 0xd9, 0xbe, 0x97, 0xf1, 0x2a, 0xbe, 0x9f, 0x01, 0xd3, 0xdd, 0x1b, + 0x7b, 0x48, 0xd4, 0x04, 0x63, 0x30, 0x93, 0xa0, 0x6e, 0x53, 0xe3, 0x77, 0x0c, 0x30, 0xf6, 0x96, + 0x92, 0x8b, 0xdf, 0xba, 0xf7, 0x7d, 0x06, 0x15, 0x9f, 0x93, 0x09, 0x83, 0x22, 0x1d, 0xb7, 0xf8, + 0x7b, 0x03, 0x4c, 0x75, 0x6d, 0x00, 0xe0, 0x15, 0x30, 0x6c, 0xb9, 0xae, 0x7f, 0x28, 0x46, 0x89, + 0xda, 0x3b, 0x82, 0x65, 0x4e, 0x45, 0x92, 0xab, 0x45, 0x2f, 0xf3, 0x59, 0x45, 0xaf, 0xf8, 0x27, + 0x03, 0x5c, 0xbe, 0x5d, 0x26, 0xde, 0x93, 0x25, 0x5d, 0x00, 0x79, 0xd9, 0xbc, 0x1f, 0xf3, 0xe5, + 0x94, 0xa5, 0x58, 0x16, 0x0d, 0xfe, 0xa1, 0x99, 0xf8, 0x55, 0xfc, 0xc0, 0x00, 0x13, 0x15, 0x4c, + 0x1a, 0x4e, 0x15, 0x23, 0xbc, 0x87, 0x09, 0xf6, 0xaa, 0x18, 0x2e, 0x82, 0x51, 0xfe, 0xba, 0x3b, + 0xb0, 0xaa, 0xf1, 0xab, 0x9b, 0x49, 0x19, 0xf2, 0xd1, 0xcd, 0x98, 0x81, 0x12, 0x19, 0xf5, 0x9a, + 0x27, 0xd3, 0xf3, 0x35, 0xcf, 0x65, 0x30, 0x14, 0x24, 0x83, 0xe8, 0x3c, 0xe3, 0xf2, 0xd9, 0x33, + 0xa7, 0x72, 0xae, 0x4f, 0x42, 0x3e, 0x5d, 0xcb, 0x49, 0xae, 0x4f, 0x42, 0xc4, 0xa9, 0xc5, 0x5f, + 0x1b, 0xe0, 0x5c, 0x7b, 0x1d, 0x67, 0x80, 0x24, 0x72, 0x3b, 0xde, 0x2b, 0x31, 0x1e, 0xe2, 0x1c, + 0xfd, 0x73, 0x97, 0xcc, 0xed, 0x3f, 0x77, 0x81, 0xcf, 0x83, 0x49, 0xf9, 0x73, 0xed, 0x28, 0x20, + 0x98, 0xf2, 0x77, 0xa7, 0xd9, 0xf6, 0x8f, 0x66, 0x37, 0xd2, 0x02, 0xa8, 0x53, 0xa7, 0xf8, 0x67, + 0x03, 0x74, 0xfb, 0x76, 0x0d, 0x5e, 0x12, 0x93, 0x50, 0x6d, 0xbc, 0x18, 0x4f, 0x41, 0x61, 0x03, + 0x8c, 0x50, 0x11, 0x7e, 0x99, 0x1e, 0x5b, 0xa7, 0x4c, 0x8f, 0xf4, 0x62, 0x8a, 0x16, 0x2c, 0xa6, + 0xc6, 0x60, 0x2c, 0x43, 0xaa, 0x56, 0x39, 0xf2, 0x6c, 0x39, 0x1c, 0x1f, 0x17, 0x19, 0xb2, 0xb2, + 0x2c, 0x68, 0x48, 0x71, 0xcb, 0xd5, 0x0f, 0x3f, 0x99, 0x3d, 0xf3, 0xd1, 0x27, 0xb3, 0x67, 0x3e, + 0xfe, 0x64, 0xf6, 0xcc, 0x37, 0x5a, 0xb3, 0xc6, 0x87, 0xad, 0x59, 0xe3, 0xa3, 0xd6, 0xac, 0xf1, + 0x71, 0x6b, 0xd6, 0xf8, 0x7b, 0x6b, 0xd6, 0xf8, 0xd1, 0x3f, 0x66, 0xcf, 0x7c, 0xe5, 0xb9, 0x53, + 0x7d, 0x2e, 0xfe, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x11, 0x77, 0x12, 0x7e, 0x87, 0x2e, 0x00, + 0x00, } func (m *ConversionRequest) Marshal() (dAtA []byte, err error) { @@ -2656,6 +2658,11 @@ func (m *ValidationRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i -= len(m.MessageExpression) + copy(dAtA[i:], m.MessageExpression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MessageExpression))) + i-- + dAtA[i] = 0x1a i -= len(m.Message) copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) @@ -3345,6 +3352,8 @@ func (m *ValidationRule) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Message) n += 1 + l + sovGenerated(uint64(l)) + l = len(m.MessageExpression) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -3811,6 +3820,7 @@ func (this *ValidationRule) String() string { s := strings.Join([]string{`&ValidationRule{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `MessageExpression:` + fmt.Sprintf("%v", this.MessageExpression) + `,`, `}`, }, "") return s @@ -9037,6 +9047,38 @@ func (m *ValidationRule) Unmarshal(dAtA []byte) error { } m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MessageExpression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MessageExpression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto index 3f8ba6c7bf..feb9fd9d1c 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto @@ -717,6 +717,19 @@ message ValidationRule { // If unset, the message is "failed rule: {Rule}". // e.g. "must be a URL with the host matching spec.host" optional string message = 2; + + // MessageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. + // Since messageExpression is used as a failure message, it must evaluate to a string. + // If both message and messageExpression are present on a rule, then messageExpression will be used if validation + // fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced + // as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string + // that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and + // the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. + // messageExpression has access to all the same variables as the rule; the only difference is the return type. + // Example: + // "x must be less than max ("+string(self.max)+")" + // +optional + optional string messageExpression = 3; } // WebhookClientConfig contains the information to make a TLS connection with the webhook. diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go index c9d943c9a8..ab0169f93e 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go @@ -235,12 +235,24 @@ type ValidationRule struct { // If unset, the message is "failed rule: {Rule}". // e.g. "must be a URL with the host matching spec.host" Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` + // MessageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. + // Since messageExpression is used as a failure message, it must evaluate to a string. + // If both message and messageExpression are present on a rule, then messageExpression will be used if validation + // fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced + // as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string + // that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and + // the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. + // messageExpression has access to all the same variables as the rule; the only difference is the return type. + // Example: + // "x must be less than max ("+string(self.max)+")" + // +optional + MessageExpression string `json:"messageExpression,omitempty" protobuf:"bytes,3,opt,name=messageExpression"` } // JSON represents any valid JSON value. // These types are supported: bool, int64, float64, string, []interface{}, map[string]interface{} and nil. type JSON struct { - Raw []byte `protobuf:"bytes,1,opt,name=raw"` + Raw []byte `json:"-" protobuf:"bytes,1,opt,name=raw"` } // OpenAPISchemaType is used by the kube-openapi generator when constructing diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go index 54cae3cfdf..ef5bc5e330 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go @@ -1306,6 +1306,7 @@ func Convert_apiextensions_ServiceReference_To_v1beta1_ServiceReference(in *apie func autoConvert_v1beta1_ValidationRule_To_apiextensions_ValidationRule(in *ValidationRule, out *apiextensions.ValidationRule, s conversion.Scope) error { out.Rule = in.Rule out.Message = in.Message + out.MessageExpression = in.MessageExpression return nil } @@ -1317,6 +1318,7 @@ func Convert_v1beta1_ValidationRule_To_apiextensions_ValidationRule(in *Validati func autoConvert_apiextensions_ValidationRule_To_v1beta1_ValidationRule(in *apiextensions.ValidationRule, out *ValidationRule, s conversion.Scope) error { out.Rule = in.Rule out.Message = in.Message + out.MessageExpression = in.MessageExpression return nil } diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/help.go b/vendor/k8s.io/apimachinery/pkg/api/meta/help.go index 899d3e8a66..1bf6b06d47 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/help.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/help.go @@ -40,8 +40,7 @@ var ( // IsListType returns true if the provided Object has a slice called Items. // TODO: Replace the code in this check with an interface comparison by -// -// creating and enforcing that lists implement a list accessor. +// creating and enforcing that lists implement a list accessor. func IsListType(obj runtime.Object) bool { switch t := obj.(type) { case runtime.Unstructured: diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/defaults.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/defaults.go new file mode 100644 index 0000000000..29c6a48b6a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/defaults.go @@ -0,0 +1,38 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internalversion + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// SetListOptionsDefaults sets defaults on the provided ListOptions if applicable. +// +// TODO(#115478): once the watch-list fg is always on we register this function in the scheme (via AddTypeDefaultingFunc). +// TODO(#115478): when the function is registered in the scheme remove all callers of this method. +func SetListOptionsDefaults(obj *ListOptions, isWatchListFeatureEnabled bool) { + if !isWatchListFeatureEnabled { + return + } + if obj.SendInitialEvents != nil || len(obj.ResourceVersionMatch) != 0 { + return + } + legacy := obj.ResourceVersion == "" || obj.ResourceVersion == "0" + if obj.Watch && legacy { + turnOnInitialEvents := true + obj.SendInitialEvents = &turnOnInitialEvents + obj.ResourceVersionMatch = metav1.ResourceVersionMatchNotOlderThan + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go index a49b5f2bef..00d2b8c689 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go @@ -66,6 +66,31 @@ type ListOptions struct { // it does not recognize and will return a 410 error if the token can no longer be used because // it has expired. Continue string + + // `sendInitialEvents=true` may be set together with `watch=true`. + // In that case, the watch stream will begin with synthetic events to + // produce the current state of objects in the collection. Once all such + // events have been sent, a synthetic "Bookmark" event will be sent. + // The bookmark will report the ResourceVersion (RV) corresponding to the + // set of objects, and be marked with `"k8s.io/initial-events-end": "true"` annotation. + // Afterwards, the watch stream will proceed as usual, sending watch events + // corresponding to changes (subsequent to the RV) to objects watched. + // + // When `sendInitialEvents` option is set, we require `resourceVersionMatch` + // option to also be set. The semantic of the watch request is as following: + // - `resourceVersionMatch` = NotOlderThan + // is interpreted as "data at least as new as the provided `resourceVersion`" + // and the bookmark event is send when the state is synced + // to a `resourceVersion` at least as fresh as the one provided by the ListOptions. + // If `resourceVersion` is unset, this is interpreted as "consistent read" and the + // bookmark event is send when the state is synced at least to the moment + // when request started being processed. + // - `resourceVersionMatch` set to any other value or unset + // Invalid error is returned. + // + // Defaults to true if `resourceVersion=""` or `resourceVersion="0"` (for backward + // compatibility reasons) and to false otherwise. + SendInitialEvents *bool } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go index 6d212b846a..a6552c276e 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go @@ -115,6 +115,7 @@ func autoConvert_internalversion_ListOptions_To_v1_ListOptions(in *ListOptions, out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) out.Limit = in.Limit out.Continue = in.Continue + out.SendInitialEvents = (*bool)(unsafe.Pointer(in.SendInitialEvents)) return nil } @@ -137,6 +138,7 @@ func autoConvert_v1_ListOptions_To_internalversion_ListOptions(in *v1.ListOption out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) out.Limit = in.Limit out.Continue = in.Continue + out.SendInitialEvents = (*bool)(unsafe.Pointer(in.SendInitialEvents)) return nil } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go index 6e1eac5c75..af66a2ac4c 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go @@ -75,6 +75,11 @@ func (in *ListOptions) DeepCopyInto(out *ListOptions) { *out = new(int64) **out = **in } + if in.SendInitialEvents != nil { + in, out := &in.SendInitialEvents, &out.SendInitialEvents + *out = new(bool) + **out = **in + } return } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go index 7e00eb7d96..1a641e7c12 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go @@ -1326,185 +1326,187 @@ func init() { } var fileDescriptor_cf52fa777ced5367 = []byte{ - // 2842 bytes of a gzipped FileDescriptorProto + // 2867 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x1a, 0x4b, 0x6f, 0x24, 0x47, 0xd9, 0x3d, 0x0f, 0x7b, 0xe6, 0x9b, 0x19, 0x3f, 0x6a, 0xbd, 0x30, 0x6b, 0x84, 0xc7, 0xe9, 0x44, 0xd1, 0x06, 0x92, 0x71, 0x76, 0x09, 0xd1, 0x66, 0x43, 0x02, 0x1e, 0xcf, 0x7a, 0xe3, 0x64, 0x1d, 0x5b, 0xe5, 0xdd, 0x05, 0x42, 0x84, 0xd2, 0x9e, 0x2e, 0x8f, 0x1b, 0xf7, 0x74, 0x4f, 0xaa, 0x7a, 0xbc, 0x19, 0x38, 0x90, 0x03, 0x08, 0x90, 0x50, 0x14, 0x6e, 0x9c, 0x50, 0x22, 0xf8, 0x01, 0x88, - 0x0b, 0xdc, 0x41, 0x22, 0xc7, 0x20, 0x2e, 0x91, 0x40, 0xa3, 0xc4, 0x1c, 0x38, 0x22, 0xae, 0xbe, - 0x80, 0xea, 0xd1, 0xdd, 0xd5, 0xf3, 0x58, 0xf7, 0x64, 0x97, 0x88, 0xdb, 0xf4, 0xf7, 0xae, 0xaa, - 0xaf, 0xbe, 0x47, 0x7d, 0x03, 0x3b, 0xc7, 0xd7, 0x58, 0xdd, 0xf1, 0xd7, 0x8f, 0x7b, 0x07, 0x84, - 0x7a, 0x24, 0x20, 0x6c, 0xfd, 0x84, 0x78, 0xb6, 0x4f, 0xd7, 0x15, 0xc2, 0xea, 0x3a, 0x1d, 0xab, - 0x75, 0xe4, 0x78, 0x84, 0xf6, 0xd7, 0xbb, 0xc7, 0x6d, 0x0e, 0x60, 0xeb, 0x1d, 0x12, 0x58, 0xeb, - 0x27, 0x57, 0xd6, 0xdb, 0xc4, 0x23, 0xd4, 0x0a, 0x88, 0x5d, 0xef, 0x52, 0x3f, 0xf0, 0xd1, 0x63, - 0x92, 0xab, 0xae, 0x73, 0xd5, 0xbb, 0xc7, 0x6d, 0x0e, 0x60, 0x75, 0xce, 0x55, 0x3f, 0xb9, 0xb2, - 0xf2, 0x54, 0xdb, 0x09, 0x8e, 0x7a, 0x07, 0xf5, 0x96, 0xdf, 0x59, 0x6f, 0xfb, 0x6d, 0x7f, 0x5d, - 0x30, 0x1f, 0xf4, 0x0e, 0xc5, 0x97, 0xf8, 0x10, 0xbf, 0xa4, 0xd0, 0x95, 0x89, 0xa6, 0xd0, 0x9e, - 0x17, 0x38, 0x1d, 0x32, 0x6c, 0xc5, 0xca, 0xb3, 0xe7, 0x31, 0xb0, 0xd6, 0x11, 0xe9, 0x58, 0xc3, - 0x7c, 0xe6, 0x9f, 0xb3, 0x50, 0xd8, 0xd8, 0xdb, 0xbe, 0x49, 0xfd, 0x5e, 0x17, 0xad, 0x41, 0xce, - 0xb3, 0x3a, 0xa4, 0x6a, 0xac, 0x19, 0x97, 0x8b, 0x8d, 0xf2, 0x07, 0x83, 0xda, 0xcc, 0xe9, 0xa0, - 0x96, 0x7b, 0xd5, 0xea, 0x10, 0x2c, 0x30, 0xc8, 0x85, 0xc2, 0x09, 0xa1, 0xcc, 0xf1, 0x3d, 0x56, - 0xcd, 0xac, 0x65, 0x2f, 0x97, 0xae, 0xbe, 0x58, 0x4f, 0xb3, 0xfe, 0xba, 0x50, 0x70, 0x57, 0xb2, - 0x6e, 0xf9, 0xb4, 0xe9, 0xb0, 0x96, 0x7f, 0x42, 0x68, 0xbf, 0xb1, 0xa8, 0xb4, 0x14, 0x14, 0x92, - 0xe1, 0x48, 0x03, 0xfa, 0x91, 0x01, 0x8b, 0x5d, 0x4a, 0x0e, 0x09, 0xa5, 0xc4, 0x56, 0xf8, 0x6a, - 0x76, 0xcd, 0x78, 0x08, 0x6a, 0xab, 0x4a, 0xed, 0xe2, 0xde, 0x90, 0x7c, 0x3c, 0xa2, 0x11, 0xfd, - 0xda, 0x80, 0x15, 0x46, 0xe8, 0x09, 0xa1, 0x1b, 0xb6, 0x4d, 0x09, 0x63, 0x8d, 0xfe, 0xa6, 0xeb, - 0x10, 0x2f, 0xd8, 0xdc, 0x6e, 0x62, 0x56, 0xcd, 0x89, 0x7d, 0xf8, 0x7a, 0x3a, 0x83, 0xf6, 0x27, - 0xc9, 0x69, 0x98, 0xca, 0xa2, 0x95, 0x89, 0x24, 0x0c, 0xdf, 0xc7, 0x0c, 0xf3, 0x10, 0xca, 0xe1, - 0x41, 0xde, 0x72, 0x58, 0x80, 0xee, 0xc2, 0x6c, 0x9b, 0x7f, 0xb0, 0xaa, 0x21, 0x0c, 0xac, 0xa7, - 0x33, 0x30, 0x94, 0xd1, 0x98, 0x57, 0xf6, 0xcc, 0x8a, 0x4f, 0x86, 0x95, 0x34, 0xf3, 0x67, 0x39, - 0x28, 0x6d, 0xec, 0x6d, 0x63, 0xc2, 0xfc, 0x1e, 0x6d, 0x91, 0x14, 0x4e, 0x73, 0x0d, 0xca, 0xcc, - 0xf1, 0xda, 0x3d, 0xd7, 0xa2, 0x1c, 0x5a, 0x9d, 0x15, 0x94, 0xcb, 0x8a, 0xb2, 0xbc, 0xaf, 0xe1, - 0x70, 0x82, 0x12, 0x5d, 0x05, 0xe0, 0x12, 0x58, 0xd7, 0x6a, 0x11, 0xbb, 0x9a, 0x59, 0x33, 0x2e, - 0x17, 0x1a, 0x48, 0xf1, 0xc1, 0xab, 0x11, 0x06, 0x6b, 0x54, 0xe8, 0x51, 0xc8, 0x0b, 0x4b, 0xab, - 0x05, 0xa1, 0xa6, 0xa2, 0xc8, 0xf3, 0x62, 0x19, 0x58, 0xe2, 0xd0, 0x13, 0x30, 0xa7, 0xbc, 0xac, - 0x5a, 0x14, 0x64, 0x0b, 0x8a, 0x6c, 0x2e, 0x74, 0x83, 0x10, 0xcf, 0xd7, 0x77, 0xec, 0x78, 0xb6, - 0xf0, 0x3b, 0x6d, 0x7d, 0xaf, 0x38, 0x9e, 0x8d, 0x05, 0x06, 0xdd, 0x82, 0xfc, 0x09, 0xa1, 0x07, - 0xdc, 0x13, 0xb8, 0x6b, 0x7e, 0x39, 0xdd, 0x46, 0xdf, 0xe5, 0x2c, 0x8d, 0x22, 0x37, 0x4d, 0xfc, - 0xc4, 0x52, 0x08, 0xaa, 0x03, 0xb0, 0x23, 0x9f, 0x06, 0x62, 0x79, 0xd5, 0xfc, 0x5a, 0xf6, 0x72, - 0xb1, 0x31, 0xcf, 0xd7, 0xbb, 0x1f, 0x41, 0xb1, 0x46, 0xc1, 0xe9, 0x5b, 0x56, 0x40, 0xda, 0x3e, - 0x75, 0x08, 0xab, 0xce, 0xc5, 0xf4, 0x9b, 0x11, 0x14, 0x6b, 0x14, 0xe8, 0x65, 0x40, 0x2c, 0xf0, - 0xa9, 0xd5, 0x26, 0x6a, 0xa9, 0x2f, 0x59, 0xec, 0xa8, 0x0a, 0x62, 0x75, 0x2b, 0x6a, 0x75, 0x68, - 0x7f, 0x84, 0x02, 0x8f, 0xe1, 0x32, 0x7f, 0x67, 0xc0, 0x82, 0xe6, 0x0b, 0xc2, 0xef, 0xae, 0x41, - 0xb9, 0xad, 0xdd, 0x3a, 0xe5, 0x17, 0xd1, 0x69, 0xeb, 0x37, 0x12, 0x27, 0x28, 0x11, 0x81, 0x22, - 0x55, 0x92, 0xc2, 0xe8, 0x72, 0x25, 0xb5, 0xd3, 0x86, 0x36, 0xc4, 0x9a, 0x34, 0x20, 0xc3, 0xb1, - 0x64, 0xf3, 0x9f, 0x86, 0x70, 0xe0, 0x30, 0xde, 0xa0, 0xcb, 0x5a, 0x4c, 0x33, 0xc4, 0xf6, 0x95, - 0x27, 0xc4, 0xa3, 0x73, 0x02, 0x41, 0xe6, 0xff, 0x22, 0x10, 0x5c, 0x2f, 0xfc, 0xf2, 0xbd, 0xda, - 0xcc, 0xdb, 0x7f, 0x5f, 0x9b, 0x31, 0x7f, 0x61, 0x40, 0x79, 0xa3, 0xdb, 0x75, 0xfb, 0xbb, 0xdd, - 0x40, 0x2c, 0xc0, 0x84, 0x59, 0x9b, 0xf6, 0x71, 0xcf, 0x53, 0x0b, 0x05, 0x7e, 0xbf, 0x9b, 0x02, - 0x82, 0x15, 0x86, 0xdf, 0x9f, 0x43, 0x9f, 0xb6, 0x88, 0xba, 0x6e, 0xd1, 0xfd, 0xd9, 0xe2, 0x40, - 0x2c, 0x71, 0xfc, 0x90, 0x0f, 0x1d, 0xe2, 0xda, 0x3b, 0x96, 0x67, 0xb5, 0x09, 0x55, 0x97, 0x23, - 0xda, 0xfa, 0x2d, 0x0d, 0x87, 0x13, 0x94, 0xe6, 0x7f, 0x32, 0x50, 0xdc, 0xf4, 0x3d, 0xdb, 0x09, - 0xd4, 0xe5, 0x0a, 0xfa, 0xdd, 0x91, 0xe0, 0x71, 0xbb, 0xdf, 0x25, 0x58, 0x60, 0xd0, 0x73, 0x30, - 0xcb, 0x02, 0x2b, 0xe8, 0x31, 0x61, 0x4f, 0xb1, 0xf1, 0x48, 0x18, 0x96, 0xf6, 0x05, 0xf4, 0x6c, - 0x50, 0x5b, 0x88, 0xc4, 0x49, 0x10, 0x56, 0x0c, 0xdc, 0xd3, 0xfd, 0x03, 0xb1, 0x51, 0xf6, 0x4d, - 0x99, 0xf6, 0xc2, 0xfc, 0x91, 0x8d, 0x3d, 0x7d, 0x77, 0x84, 0x02, 0x8f, 0xe1, 0x42, 0x27, 0x80, - 0x5c, 0x8b, 0x05, 0xb7, 0xa9, 0xe5, 0x31, 0xa1, 0xeb, 0xb6, 0xd3, 0x21, 0xea, 0xc2, 0x7f, 0x29, - 0xdd, 0x89, 0x73, 0x8e, 0x58, 0xef, 0xad, 0x11, 0x69, 0x78, 0x8c, 0x06, 0xf4, 0x38, 0xcc, 0x52, - 0x62, 0x31, 0xdf, 0xab, 0xe6, 0xc5, 0xf2, 0xa3, 0xa8, 0x8c, 0x05, 0x14, 0x2b, 0x2c, 0x0f, 0x68, - 0x1d, 0xc2, 0x98, 0xd5, 0x0e, 0xc3, 0x6b, 0x14, 0xd0, 0x76, 0x24, 0x18, 0x87, 0x78, 0xf3, 0xb7, - 0x06, 0x54, 0x36, 0x29, 0xb1, 0x02, 0x32, 0x8d, 0x5b, 0x7c, 0xea, 0x13, 0x47, 0x1b, 0xb0, 0x20, - 0xbe, 0xef, 0x5a, 0xae, 0x63, 0xcb, 0x33, 0xc8, 0x09, 0xe6, 0xcf, 0x2b, 0xe6, 0x85, 0xad, 0x24, - 0x1a, 0x0f, 0xd3, 0x9b, 0x3f, 0xc9, 0x42, 0xa5, 0x49, 0x5c, 0x12, 0x9b, 0xbc, 0x05, 0xa8, 0x4d, - 0xad, 0x16, 0xd9, 0x23, 0xd4, 0xf1, 0xed, 0x7d, 0xd2, 0xf2, 0x3d, 0x9b, 0x09, 0x37, 0xca, 0x36, - 0x3e, 0xc7, 0xf7, 0xf7, 0xe6, 0x08, 0x16, 0x8f, 0xe1, 0x40, 0x2e, 0x54, 0xba, 0x54, 0xfc, 0x16, - 0x7b, 0x2e, 0xbd, 0xac, 0x74, 0xf5, 0x2b, 0xe9, 0x8e, 0x74, 0x4f, 0x67, 0x6d, 0x2c, 0x9d, 0x0e, - 0x6a, 0x95, 0x04, 0x08, 0x27, 0x85, 0xa3, 0x6f, 0xc0, 0xa2, 0x4f, 0xbb, 0x47, 0x96, 0xd7, 0x24, - 0x5d, 0xe2, 0xd9, 0xc4, 0x0b, 0x98, 0xd8, 0xc8, 0x42, 0x63, 0x99, 0xd7, 0x22, 0xbb, 0x43, 0x38, - 0x3c, 0x42, 0x8d, 0x5e, 0x83, 0xa5, 0x2e, 0xf5, 0xbb, 0x56, 0x5b, 0x6c, 0xcc, 0x9e, 0xef, 0x3a, - 0xad, 0xbe, 0xda, 0xce, 0x27, 0x4f, 0x07, 0xb5, 0xa5, 0xbd, 0x61, 0xe4, 0xd9, 0xa0, 0x76, 0x41, - 0x6c, 0x1d, 0x87, 0xc4, 0x48, 0x3c, 0x2a, 0x46, 0x73, 0x83, 0xfc, 0x24, 0x37, 0x30, 0xb7, 0xa1, - 0xd0, 0xec, 0xa9, 0x3b, 0xf1, 0x02, 0x14, 0x6c, 0xf5, 0x5b, 0xed, 0x7c, 0x78, 0x39, 0x23, 0x9a, - 0xb3, 0x41, 0xad, 0xc2, 0xcb, 0xcf, 0x7a, 0x08, 0xc0, 0x11, 0x8b, 0xf9, 0x38, 0x14, 0xc4, 0xc1, - 0xb3, 0xbb, 0x57, 0xd0, 0x22, 0x64, 0xb1, 0x75, 0x4f, 0x48, 0x29, 0x63, 0xfe, 0x53, 0x8b, 0x62, - 0xbb, 0x00, 0x37, 0x49, 0x10, 0x1e, 0xfc, 0x06, 0x2c, 0x84, 0xa1, 0x3c, 0x99, 0x61, 0x22, 0x6f, - 0xc2, 0x49, 0x34, 0x1e, 0xa6, 0x37, 0x5f, 0x87, 0xa2, 0xc8, 0x42, 0x3c, 0x85, 0xc7, 0xe5, 0x82, - 0x71, 0x9f, 0x72, 0x21, 0xac, 0x01, 0x32, 0x93, 0x6a, 0x00, 0xcd, 0x5c, 0x17, 0x2a, 0x92, 0x37, - 0x2c, 0x90, 0x52, 0x69, 0x78, 0x12, 0x0a, 0xa1, 0x99, 0x4a, 0x4b, 0x54, 0x18, 0x87, 0x82, 0x70, - 0x44, 0xa1, 0x69, 0x3b, 0x82, 0x44, 0x46, 0x4d, 0xa7, 0x4c, 0xab, 0x7e, 0x32, 0xf7, 0xaf, 0x7e, - 0x34, 0x4d, 0x3f, 0x84, 0xea, 0xa4, 0x6a, 0xfa, 0x01, 0x72, 0x7e, 0x7a, 0x53, 0xcc, 0x77, 0x0c, - 0x58, 0xd4, 0x25, 0xa5, 0x3f, 0xbe, 0xf4, 0x4a, 0xce, 0xaf, 0xf6, 0xb4, 0x1d, 0xf9, 0x95, 0x01, - 0xcb, 0x89, 0xa5, 0x4d, 0x75, 0xe2, 0x53, 0x18, 0xa5, 0x3b, 0x47, 0x76, 0x0a, 0xe7, 0xf8, 0x6b, - 0x06, 0x2a, 0xb7, 0xac, 0x03, 0xe2, 0xee, 0x13, 0x97, 0xb4, 0x02, 0x9f, 0xa2, 0x1f, 0x40, 0xa9, - 0x63, 0x05, 0xad, 0x23, 0x01, 0x0d, 0x3b, 0x83, 0x66, 0xba, 0x60, 0x97, 0x90, 0x54, 0xdf, 0x89, - 0xc5, 0xdc, 0xf0, 0x02, 0xda, 0x6f, 0x5c, 0x50, 0x26, 0x95, 0x34, 0x0c, 0xd6, 0xb5, 0x89, 0x76, - 0x4e, 0x7c, 0xdf, 0x78, 0xab, 0xcb, 0xcb, 0x96, 0xe9, 0xbb, 0xc8, 0x84, 0x09, 0x98, 0xbc, 0xd9, - 0x73, 0x28, 0xe9, 0x10, 0x2f, 0x88, 0xdb, 0xb9, 0x9d, 0x21, 0xf9, 0x78, 0x44, 0xe3, 0xca, 0x8b, - 0xb0, 0x38, 0x6c, 0x3c, 0x8f, 0x3f, 0xc7, 0xa4, 0x2f, 0xcf, 0x0b, 0xf3, 0x9f, 0x68, 0x19, 0xf2, - 0x27, 0x96, 0xdb, 0x53, 0xb7, 0x11, 0xcb, 0x8f, 0xeb, 0x99, 0x6b, 0x86, 0xf9, 0x1b, 0x03, 0xaa, - 0x93, 0x0c, 0x41, 0x5f, 0xd4, 0x04, 0x35, 0x4a, 0xca, 0xaa, 0xec, 0x2b, 0xa4, 0x2f, 0xa5, 0xde, - 0x80, 0x82, 0xdf, 0xe5, 0x35, 0x85, 0x4f, 0xd5, 0xa9, 0x3f, 0x11, 0x9e, 0xe4, 0xae, 0x82, 0x9f, - 0x0d, 0x6a, 0x17, 0x13, 0xe2, 0x43, 0x04, 0x8e, 0x58, 0x79, 0xa4, 0x16, 0xf6, 0xf0, 0xec, 0x11, - 0x45, 0xea, 0xbb, 0x02, 0x82, 0x15, 0xc6, 0xfc, 0x83, 0x01, 0x39, 0x51, 0x90, 0xbf, 0x0e, 0x05, - 0xbe, 0x7f, 0xb6, 0x15, 0x58, 0xc2, 0xae, 0xd4, 0xad, 0x20, 0xe7, 0xde, 0x21, 0x81, 0x15, 0x7b, - 0x5b, 0x08, 0xc1, 0x91, 0x44, 0x84, 0x21, 0xef, 0x04, 0xa4, 0x13, 0x1e, 0xe4, 0x53, 0x13, 0x45, - 0xab, 0x87, 0x88, 0x3a, 0xb6, 0xee, 0xdd, 0x78, 0x2b, 0x20, 0x1e, 0x3f, 0x8c, 0xf8, 0x6a, 0x6c, - 0x73, 0x19, 0x58, 0x8a, 0x32, 0xff, 0x6d, 0x40, 0xa4, 0x8a, 0x3b, 0x3f, 0x23, 0xee, 0xe1, 0x2d, - 0xc7, 0x3b, 0x56, 0xdb, 0x1a, 0x99, 0xb3, 0xaf, 0xe0, 0x38, 0xa2, 0x18, 0x97, 0x1e, 0x32, 0xd3, - 0xa5, 0x07, 0xae, 0xb0, 0xe5, 0x7b, 0x81, 0xe3, 0xf5, 0x46, 0x6e, 0xdb, 0xa6, 0x82, 0xe3, 0x88, - 0x82, 0x17, 0x22, 0x94, 0x74, 0x2c, 0xc7, 0x73, 0xbc, 0x36, 0x5f, 0xc4, 0xa6, 0xdf, 0xf3, 0x02, - 0x91, 0x91, 0x55, 0x21, 0x82, 0x47, 0xb0, 0x78, 0x0c, 0x87, 0xf9, 0xfb, 0x1c, 0x94, 0xf8, 0x9a, - 0xc3, 0x3c, 0xf7, 0x3c, 0x54, 0x5c, 0xdd, 0x0b, 0xd4, 0xda, 0x2f, 0x2a, 0x53, 0x92, 0xf7, 0x1a, - 0x27, 0x69, 0x39, 0xb3, 0x28, 0xa1, 0x22, 0xe6, 0x4c, 0x92, 0x79, 0x4b, 0x47, 0xe2, 0x24, 0x2d, - 0x8f, 0x5e, 0xf7, 0xf8, 0xfd, 0x50, 0x95, 0x49, 0x74, 0x44, 0xdf, 0xe4, 0x40, 0x2c, 0x71, 0x68, - 0x07, 0x2e, 0x58, 0xae, 0xeb, 0xdf, 0x13, 0xc0, 0x86, 0xef, 0x1f, 0x77, 0x2c, 0x7a, 0xcc, 0x44, - 0x33, 0x5d, 0x68, 0x7c, 0x41, 0xb1, 0x5c, 0xd8, 0x18, 0x25, 0xc1, 0xe3, 0xf8, 0xc6, 0x1d, 0x5b, - 0x6e, 0xca, 0x63, 0x3b, 0x82, 0xe5, 0x21, 0x90, 0xb8, 0xe5, 0xaa, 0xb3, 0x7d, 0x46, 0xc9, 0x59, - 0xc6, 0x63, 0x68, 0xce, 0x26, 0xc0, 0xf1, 0x58, 0x89, 0xe8, 0x3a, 0xcc, 0x73, 0x4f, 0xf6, 0x7b, - 0x41, 0x58, 0x77, 0xe6, 0xc5, 0x71, 0xa3, 0xd3, 0x41, 0x6d, 0xfe, 0x76, 0x02, 0x83, 0x87, 0x28, - 0xf9, 0xe6, 0xba, 0x4e, 0xc7, 0x09, 0xaa, 0x73, 0x82, 0x25, 0xda, 0xdc, 0x5b, 0x1c, 0x88, 0x25, - 0x2e, 0xe1, 0x81, 0x85, 0xf3, 0x3c, 0xd0, 0xfc, 0x4b, 0x16, 0x90, 0xac, 0xb5, 0x6d, 0x59, 0x4f, - 0xc9, 0x90, 0xc6, 0x3b, 0x02, 0x55, 0xab, 0x1b, 0x43, 0x1d, 0x81, 0x2a, 0xd3, 0x43, 0x3c, 0xda, - 0x81, 0xa2, 0x0c, 0x2d, 0xf1, 0x75, 0x59, 0x57, 0xc4, 0xc5, 0xdd, 0x10, 0x71, 0x36, 0xa8, 0xad, - 0x24, 0xd4, 0x44, 0x18, 0xd1, 0xad, 0xc5, 0x12, 0xd0, 0x55, 0x00, 0xab, 0xeb, 0xe8, 0xef, 0x75, - 0xc5, 0xf8, 0xd5, 0x26, 0xee, 0xbc, 0xb1, 0x46, 0x85, 0x5e, 0x82, 0x5c, 0xf0, 0xe9, 0x3a, 0xaa, - 0x82, 0x68, 0x18, 0x79, 0xff, 0x24, 0x24, 0x70, 0xed, 0xc2, 0x9f, 0x19, 0x37, 0x4b, 0x35, 0x43, - 0x91, 0xf6, 0xad, 0x08, 0x83, 0x35, 0x2a, 0xf4, 0x2d, 0x28, 0x1c, 0xaa, 0x52, 0x54, 0x1c, 0x4c, - 0xea, 0x10, 0x19, 0x16, 0xb0, 0xf2, 0xc9, 0x20, 0xfc, 0xc2, 0x91, 0x34, 0xf4, 0x55, 0x28, 0xb1, - 0xde, 0x41, 0x94, 0xbd, 0xe5, 0x69, 0x46, 0xa9, 0x72, 0x3f, 0x46, 0x61, 0x9d, 0xce, 0x7c, 0x13, - 0x8a, 0x3b, 0x4e, 0x8b, 0xfa, 0xa2, 0x07, 0x7c, 0x02, 0xe6, 0x58, 0xa2, 0xc1, 0x89, 0x4e, 0x32, - 0xf4, 0xb2, 0x10, 0xcf, 0xdd, 0xcb, 0xb3, 0x3c, 0x5f, 0xb6, 0x31, 0xf9, 0xd8, 0xbd, 0x5e, 0xe5, - 0x40, 0x2c, 0x71, 0xd7, 0x97, 0x79, 0x81, 0xf0, 0xd3, 0xf7, 0x6b, 0x33, 0xef, 0xbe, 0x5f, 0x9b, - 0x79, 0xef, 0x7d, 0x55, 0x2c, 0xfc, 0x11, 0x00, 0x76, 0x0f, 0xbe, 0x47, 0x5a, 0x32, 0xec, 0xa6, - 0x7a, 0xd6, 0x0b, 0x5f, 0x93, 0xc5, 0xb3, 0x5e, 0x66, 0xa8, 0xe8, 0xd3, 0x70, 0x38, 0x41, 0x89, - 0xd6, 0xa1, 0x18, 0x3d, 0xd8, 0x29, 0xff, 0x58, 0x0a, 0xfd, 0x2d, 0x7a, 0xd5, 0xc3, 0x31, 0x4d, - 0x22, 0x07, 0xe4, 0xce, 0xcd, 0x01, 0x0d, 0xc8, 0xf6, 0x1c, 0x5b, 0x35, 0xcc, 0x4f, 0x87, 0x39, - 0xf8, 0xce, 0x76, 0xf3, 0x6c, 0x50, 0x7b, 0x64, 0xd2, 0x3b, 0x79, 0xd0, 0xef, 0x12, 0x56, 0xbf, - 0xb3, 0xdd, 0xc4, 0x9c, 0x79, 0x5c, 0x40, 0x9a, 0x9d, 0x32, 0x20, 0x5d, 0x05, 0x68, 0xc7, 0xcf, - 0x0e, 0xf2, 0xbe, 0x47, 0x8e, 0xa8, 0x3d, 0x37, 0x68, 0x54, 0x88, 0xc1, 0x52, 0x8b, 0xb7, 0xe6, - 0xaa, 0xfd, 0x67, 0x81, 0xd5, 0x91, 0x0f, 0x99, 0xd3, 0xdd, 0x89, 0x4b, 0x4a, 0xcd, 0xd2, 0xe6, - 0xb0, 0x30, 0x3c, 0x2a, 0x1f, 0xf9, 0xb0, 0x64, 0xab, 0x0e, 0x31, 0x56, 0x5a, 0x9c, 0x5a, 0xe9, - 0x45, 0xae, 0xb0, 0x39, 0x2c, 0x08, 0x8f, 0xca, 0x46, 0xdf, 0x85, 0x95, 0x10, 0x38, 0xda, 0xa6, - 0x8b, 0x80, 0x9d, 0x6d, 0xac, 0x9e, 0x0e, 0x6a, 0x2b, 0xcd, 0x89, 0x54, 0xf8, 0x3e, 0x12, 0x90, - 0x0d, 0xb3, 0xae, 0x2c, 0x70, 0x4b, 0xa2, 0x28, 0xf9, 0x5a, 0xba, 0x55, 0xc4, 0xde, 0x5f, 0xd7, - 0x0b, 0xdb, 0xe8, 0xc9, 0x45, 0xd5, 0xb4, 0x4a, 0x36, 0x7a, 0x0b, 0x4a, 0x96, 0xe7, 0xf9, 0x81, - 0x25, 0x1f, 0x0e, 0xca, 0x42, 0xd5, 0xc6, 0xd4, 0xaa, 0x36, 0x62, 0x19, 0x43, 0x85, 0xb4, 0x86, - 0xc1, 0xba, 0x2a, 0x74, 0x0f, 0x16, 0xfc, 0x7b, 0x1e, 0xa1, 0x98, 0x1c, 0x12, 0x4a, 0xbc, 0x16, - 0x61, 0xd5, 0x8a, 0xd0, 0xfe, 0x4c, 0x4a, 0xed, 0x09, 0xe6, 0xd8, 0xa5, 0x93, 0x70, 0x86, 0x87, - 0xb5, 0xa0, 0x3a, 0x8f, 0xad, 0x9e, 0xe5, 0x3a, 0xdf, 0x27, 0x94, 0x55, 0xe7, 0xe3, 0xb7, 0xe6, - 0xad, 0x08, 0x8a, 0x35, 0x0a, 0xd4, 0x83, 0x4a, 0x47, 0x4f, 0x19, 0xd5, 0x25, 0x61, 0xe6, 0xb5, - 0x74, 0x66, 0x8e, 0x26, 0xb5, 0xb8, 0x82, 0x49, 0xe0, 0x70, 0x52, 0xcb, 0xca, 0x73, 0x50, 0xfa, - 0x94, 0xc5, 0x3d, 0x6f, 0x0e, 0x86, 0x0f, 0x64, 0xaa, 0xe6, 0xe0, 0x4f, 0x19, 0x98, 0x4f, 0x6e, - 0xe3, 0x50, 0x3a, 0xcc, 0xa7, 0x4a, 0x87, 0x61, 0x1b, 0x6a, 0x4c, 0x1c, 0x3a, 0x84, 0xf1, 0x39, - 0x3b, 0x31, 0x3e, 0xab, 0x30, 0x98, 0x7b, 0x90, 0x30, 0x58, 0x07, 0xe0, 0x75, 0x06, 0xf5, 0x5d, - 0x97, 0x50, 0x11, 0x01, 0x0b, 0x6a, 0xb8, 0x10, 0x41, 0xb1, 0x46, 0xc1, 0xab, 0xe1, 0x03, 0xd7, - 0x6f, 0x1d, 0x8b, 0x2d, 0x08, 0x6f, 0xaf, 0x88, 0x7d, 0x05, 0x59, 0x0d, 0x37, 0x46, 0xb0, 0x78, - 0x0c, 0x87, 0xd9, 0x87, 0x8b, 0x7b, 0x16, 0x0d, 0x1c, 0xcb, 0x8d, 0x6f, 0x8a, 0x68, 0x37, 0xde, - 0x18, 0x69, 0x66, 0x9e, 0x9e, 0xf6, 0xc6, 0xc5, 0x9b, 0x1f, 0xc3, 0xe2, 0x86, 0xc6, 0xfc, 0x9b, - 0x01, 0x97, 0xc6, 0xea, 0xfe, 0x0c, 0x9a, 0xa9, 0x37, 0x92, 0xcd, 0xd4, 0xf3, 0x29, 0x5f, 0x21, - 0xc7, 0x59, 0x3b, 0xa1, 0xb5, 0x9a, 0x83, 0xfc, 0x1e, 0x2f, 0x62, 0xcd, 0x0f, 0x0d, 0x28, 0x8b, - 0x5f, 0xd3, 0x3c, 0x02, 0xd7, 0x92, 0xb3, 0x81, 0xe2, 0xc3, 0x9b, 0x0b, 0x3c, 0x8c, 0x57, 0xe2, - 0x77, 0x0c, 0x48, 0x3e, 0xbf, 0xa2, 0x17, 0xe5, 0x15, 0x30, 0xa2, 0xf7, 0xd1, 0x29, 0xdd, 0xff, - 0x85, 0x49, 0xdd, 0xe4, 0x85, 0x54, 0x0f, 0x8d, 0x4f, 0x42, 0x11, 0xfb, 0x7e, 0xb0, 0x67, 0x05, - 0x47, 0x8c, 0xef, 0x5d, 0x97, 0xff, 0x50, 0xdb, 0x2b, 0xf6, 0x4e, 0x60, 0xb0, 0x84, 0x9b, 0x3f, - 0x37, 0xe0, 0xd2, 0xc4, 0x91, 0x0f, 0x8f, 0x22, 0xad, 0xe8, 0x4b, 0xad, 0x28, 0x72, 0xe4, 0x98, - 0x0e, 0x6b, 0x54, 0xbc, 0x0d, 0x4c, 0xcc, 0x89, 0x86, 0xdb, 0xc0, 0x84, 0x36, 0x9c, 0xa4, 0x35, - 0xff, 0x95, 0x01, 0x35, 0x63, 0xf9, 0x1f, 0x3b, 0xfd, 0xe3, 0x43, 0x13, 0x9e, 0xf9, 0xe4, 0x84, - 0x27, 0x1a, 0xe7, 0x68, 0x23, 0x8e, 0xec, 0xfd, 0x47, 0x1c, 0xe8, 0xd9, 0x68, 0x6a, 0x22, 0x7d, - 0x68, 0x35, 0x39, 0x35, 0x39, 0x1b, 0xd4, 0xca, 0x4a, 0x78, 0x72, 0x8a, 0xf2, 0x1a, 0xcc, 0xd9, - 0x24, 0xb0, 0x1c, 0x57, 0xb6, 0x74, 0xa9, 0xe7, 0x00, 0x52, 0x58, 0x53, 0xb2, 0x36, 0x4a, 0xdc, - 0x26, 0xf5, 0x81, 0x43, 0x81, 0x3c, 0x60, 0xb7, 0x7c, 0x5b, 0x76, 0x24, 0xf9, 0x38, 0x60, 0x6f, - 0xfa, 0x36, 0xc1, 0x02, 0x63, 0xbe, 0x6b, 0x40, 0x49, 0x4a, 0xda, 0xb4, 0x7a, 0x8c, 0xa0, 0x2b, - 0xd1, 0x2a, 0xe4, 0x71, 0x5f, 0xd2, 0xc7, 0x63, 0x67, 0x83, 0x5a, 0x51, 0x90, 0x89, 0x66, 0x66, - 0xcc, 0x18, 0x28, 0x73, 0xce, 0x1e, 0x3d, 0x0a, 0x79, 0x71, 0x81, 0xd4, 0x66, 0xc6, 0x73, 0x3e, - 0x0e, 0xc4, 0x12, 0x67, 0x7e, 0x9c, 0x81, 0x4a, 0x62, 0x71, 0x29, 0xfa, 0x82, 0xe8, 0xf5, 0x33, - 0x93, 0xe2, 0x45, 0x7d, 0xf2, 0x54, 0x5d, 0xa5, 0xaf, 0xd9, 0x07, 0x49, 0x5f, 0xdf, 0x86, 0xd9, - 0x16, 0xdf, 0xa3, 0xf0, 0x4f, 0x1a, 0x57, 0xa6, 0x39, 0x4e, 0xb1, 0xbb, 0xb1, 0x37, 0x8a, 0x4f, - 0x86, 0x95, 0x40, 0x74, 0x13, 0x96, 0x28, 0x09, 0x68, 0x7f, 0xe3, 0x30, 0x20, 0x54, 0x7f, 0x07, - 0xc8, 0xc7, 0xd5, 0x37, 0x1e, 0x26, 0xc0, 0xa3, 0x3c, 0xe6, 0x01, 0x94, 0x6f, 0x5b, 0x07, 0x6e, - 0x34, 0xd9, 0xc2, 0x50, 0x71, 0xbc, 0x96, 0xdb, 0xb3, 0x89, 0x0c, 0xe8, 0x61, 0xf4, 0x0a, 0x2f, - 0xed, 0xb6, 0x8e, 0x3c, 0x1b, 0xd4, 0x2e, 0x24, 0x00, 0x72, 0x94, 0x83, 0x93, 0x22, 0x4c, 0x17, - 0x72, 0x9f, 0x61, 0x27, 0xf9, 0x1d, 0x28, 0xc6, 0xb5, 0xfe, 0x43, 0x56, 0x69, 0xbe, 0x01, 0x05, - 0xee, 0xf1, 0x61, 0x8f, 0x7a, 0x4e, 0x95, 0x94, 0xac, 0xbd, 0x32, 0x69, 0x6a, 0x2f, 0x31, 0x1f, - 0xbd, 0xd3, 0xb5, 0x1f, 0x70, 0x3e, 0x9a, 0x79, 0x90, 0xcc, 0x97, 0x9d, 0x32, 0xf3, 0x5d, 0x05, - 0xf9, 0x1f, 0x12, 0x9e, 0x64, 0x64, 0x01, 0xa1, 0x25, 0x19, 0x3d, 0xff, 0x6b, 0xc3, 0x81, 0x1f, - 0x1b, 0x00, 0xe2, 0x15, 0xee, 0xc6, 0x09, 0xf1, 0x82, 0x14, 0x93, 0xf8, 0x3b, 0x30, 0xeb, 0x4b, - 0x8f, 0x94, 0x33, 0xd2, 0x29, 0x9f, 0x7a, 0xa3, 0x8b, 0x24, 0x7d, 0x12, 0x2b, 0x61, 0x8d, 0x97, - 0x3f, 0xf8, 0x64, 0x75, 0xe6, 0xc3, 0x4f, 0x56, 0x67, 0x3e, 0xfa, 0x64, 0x75, 0xe6, 0xed, 0xd3, - 0x55, 0xe3, 0x83, 0xd3, 0x55, 0xe3, 0xc3, 0xd3, 0x55, 0xe3, 0xa3, 0xd3, 0x55, 0xe3, 0xe3, 0xd3, - 0x55, 0xe3, 0xdd, 0x7f, 0xac, 0xce, 0xbc, 0xf6, 0x58, 0x9a, 0xff, 0xe6, 0xfd, 0x37, 0x00, 0x00, - 0xff, 0xff, 0x0b, 0x4d, 0x51, 0xc5, 0xdb, 0x27, 0x00, 0x00, + 0x13, 0x77, 0x90, 0xc8, 0x31, 0x88, 0x4b, 0x24, 0xd0, 0x28, 0x31, 0x07, 0x8e, 0x88, 0xab, 0x85, + 0x04, 0xaa, 0x47, 0x77, 0x57, 0xcf, 0x63, 0xdd, 0x93, 0x5d, 0x22, 0x6e, 0xd3, 0xdf, 0xbb, 0xaa, + 0xbe, 0xfa, 0xea, 0x7b, 0x0c, 0xec, 0x1c, 0x5f, 0x63, 0x75, 0xc7, 0x5f, 0x3f, 0xee, 0x1d, 0x10, + 0xea, 0x91, 0x80, 0xb0, 0xf5, 0x13, 0xe2, 0xd9, 0x3e, 0x5d, 0x57, 0x08, 0xab, 0xeb, 0x74, 0xac, + 0xd6, 0x91, 0xe3, 0x11, 0xda, 0x5f, 0xef, 0x1e, 0xb7, 0x39, 0x80, 0xad, 0x77, 0x48, 0x60, 0xad, + 0x9f, 0x5c, 0x59, 0x6f, 0x13, 0x8f, 0x50, 0x2b, 0x20, 0x76, 0xbd, 0x4b, 0xfd, 0xc0, 0x47, 0x8f, + 0x49, 0xae, 0xba, 0xce, 0x55, 0xef, 0x1e, 0xb7, 0x39, 0x80, 0xd5, 0x39, 0x57, 0xfd, 0xe4, 0xca, + 0xca, 0x53, 0x6d, 0x27, 0x38, 0xea, 0x1d, 0xd4, 0x5b, 0x7e, 0x67, 0xbd, 0xed, 0xb7, 0xfd, 0x75, + 0xc1, 0x7c, 0xd0, 0x3b, 0x14, 0x5f, 0xe2, 0x43, 0xfc, 0x92, 0x42, 0x57, 0x26, 0x9a, 0x42, 0x7b, + 0x5e, 0xe0, 0x74, 0xc8, 0xb0, 0x15, 0x2b, 0xcf, 0x9e, 0xc7, 0xc0, 0x5a, 0x47, 0xa4, 0x63, 0x0d, + 0xf3, 0x99, 0x7f, 0xca, 0x42, 0x61, 0x63, 0x6f, 0xfb, 0x26, 0xf5, 0x7b, 0x5d, 0xb4, 0x06, 0x39, + 0xcf, 0xea, 0x90, 0xaa, 0xb1, 0x66, 0x5c, 0x2e, 0x36, 0xca, 0x1f, 0x0c, 0x6a, 0x33, 0xa7, 0x83, + 0x5a, 0xee, 0x55, 0xab, 0x43, 0xb0, 0xc0, 0x20, 0x17, 0x0a, 0x27, 0x84, 0x32, 0xc7, 0xf7, 0x58, + 0x35, 0xb3, 0x96, 0xbd, 0x5c, 0xba, 0xfa, 0x62, 0x3d, 0xcd, 0xfa, 0xeb, 0x42, 0xc1, 0x5d, 0xc9, + 0xba, 0xe5, 0xd3, 0xa6, 0xc3, 0x5a, 0xfe, 0x09, 0xa1, 0xfd, 0xc6, 0xa2, 0xd2, 0x52, 0x50, 0x48, + 0x86, 0x23, 0x0d, 0xe8, 0x47, 0x06, 0x2c, 0x76, 0x29, 0x39, 0x24, 0x94, 0x12, 0x5b, 0xe1, 0xab, + 0xd9, 0x35, 0xe3, 0x21, 0xa8, 0xad, 0x2a, 0xb5, 0x8b, 0x7b, 0x43, 0xf2, 0xf1, 0x88, 0x46, 0xf4, + 0x6b, 0x03, 0x56, 0x18, 0xa1, 0x27, 0x84, 0x6e, 0xd8, 0x36, 0x25, 0x8c, 0x35, 0xfa, 0x9b, 0xae, + 0x43, 0xbc, 0x60, 0x73, 0xbb, 0x89, 0x59, 0x35, 0x27, 0xf6, 0xe1, 0xeb, 0xe9, 0x0c, 0xda, 0x9f, + 0x24, 0xa7, 0x61, 0x2a, 0x8b, 0x56, 0x26, 0x92, 0x30, 0x7c, 0x1f, 0x33, 0xcc, 0x43, 0x28, 0x87, + 0x07, 0x79, 0xcb, 0x61, 0x01, 0xba, 0x0b, 0xb3, 0x6d, 0xfe, 0xc1, 0xaa, 0x86, 0x30, 0xb0, 0x9e, + 0xce, 0xc0, 0x50, 0x46, 0x63, 0x5e, 0xd9, 0x33, 0x2b, 0x3e, 0x19, 0x56, 0xd2, 0xcc, 0x9f, 0xe5, + 0xa0, 0xb4, 0xb1, 0xb7, 0x8d, 0x09, 0xf3, 0x7b, 0xb4, 0x45, 0x52, 0x38, 0xcd, 0x35, 0x28, 0x33, + 0xc7, 0x6b, 0xf7, 0x5c, 0x8b, 0x72, 0x68, 0x75, 0x56, 0x50, 0x2e, 0x2b, 0xca, 0xf2, 0xbe, 0x86, + 0xc3, 0x09, 0x4a, 0x74, 0x15, 0x80, 0x4b, 0x60, 0x5d, 0xab, 0x45, 0xec, 0x6a, 0x66, 0xcd, 0xb8, + 0x5c, 0x68, 0x20, 0xc5, 0x07, 0xaf, 0x46, 0x18, 0xac, 0x51, 0xa1, 0x47, 0x21, 0x2f, 0x2c, 0xad, + 0x16, 0x84, 0x9a, 0x8a, 0x22, 0xcf, 0x8b, 0x65, 0x60, 0x89, 0x43, 0x4f, 0xc0, 0x9c, 0xf2, 0xb2, + 0x6a, 0x51, 0x90, 0x2d, 0x28, 0xb2, 0xb9, 0xd0, 0x0d, 0x42, 0x3c, 0x5f, 0xdf, 0xb1, 0xe3, 0xd9, + 0xc2, 0xef, 0xb4, 0xf5, 0xbd, 0xe2, 0x78, 0x36, 0x16, 0x18, 0x74, 0x0b, 0xf2, 0x27, 0x84, 0x1e, + 0x70, 0x4f, 0xe0, 0xae, 0xf9, 0xe5, 0x74, 0x1b, 0x7d, 0x97, 0xb3, 0x34, 0x8a, 0xdc, 0x34, 0xf1, + 0x13, 0x4b, 0x21, 0xa8, 0x0e, 0xc0, 0x8e, 0x7c, 0x1a, 0x88, 0xe5, 0x55, 0xf3, 0x6b, 0xd9, 0xcb, + 0xc5, 0xc6, 0x3c, 0x5f, 0xef, 0x7e, 0x04, 0xc5, 0x1a, 0x05, 0xa7, 0x6f, 0x59, 0x01, 0x69, 0xfb, + 0xd4, 0x21, 0xac, 0x3a, 0x17, 0xd3, 0x6f, 0x46, 0x50, 0xac, 0x51, 0xa0, 0x97, 0x01, 0xb1, 0xc0, + 0xa7, 0x56, 0x9b, 0xa8, 0xa5, 0xbe, 0x64, 0xb1, 0xa3, 0x2a, 0x88, 0xd5, 0xad, 0xa8, 0xd5, 0xa1, + 0xfd, 0x11, 0x0a, 0x3c, 0x86, 0xcb, 0xfc, 0x9d, 0x01, 0x0b, 0x9a, 0x2f, 0x08, 0xbf, 0xbb, 0x06, + 0xe5, 0xb6, 0x76, 0xeb, 0x94, 0x5f, 0x44, 0xa7, 0xad, 0xdf, 0x48, 0x9c, 0xa0, 0x44, 0x04, 0x8a, + 0x54, 0x49, 0x0a, 0xa3, 0xcb, 0x95, 0xd4, 0x4e, 0x1b, 0xda, 0x10, 0x6b, 0xd2, 0x80, 0x0c, 0xc7, + 0x92, 0xcd, 0x7f, 0x18, 0xc2, 0x81, 0xc3, 0x78, 0x83, 0x2e, 0x6b, 0x31, 0xcd, 0x10, 0xdb, 0x57, + 0x9e, 0x10, 0x8f, 0xce, 0x09, 0x04, 0x99, 0xff, 0x8b, 0x40, 0x70, 0xbd, 0xf0, 0xcb, 0xf7, 0x6a, + 0x33, 0x6f, 0xff, 0x6d, 0x6d, 0xc6, 0xfc, 0x85, 0x01, 0xe5, 0x8d, 0x6e, 0xd7, 0xed, 0xef, 0x76, + 0x03, 0xb1, 0x00, 0x13, 0x66, 0x6d, 0xda, 0xc7, 0x3d, 0x4f, 0x2d, 0x14, 0xf8, 0xfd, 0x6e, 0x0a, + 0x08, 0x56, 0x18, 0x7e, 0x7f, 0x0e, 0x7d, 0xda, 0x22, 0xea, 0xba, 0x45, 0xf7, 0x67, 0x8b, 0x03, + 0xb1, 0xc4, 0xf1, 0x43, 0x3e, 0x74, 0x88, 0x6b, 0xef, 0x58, 0x9e, 0xd5, 0x26, 0x54, 0x5d, 0x8e, + 0x68, 0xeb, 0xb7, 0x34, 0x1c, 0x4e, 0x50, 0x9a, 0xff, 0xc9, 0x40, 0x71, 0xd3, 0xf7, 0x6c, 0x27, + 0x50, 0x97, 0x2b, 0xe8, 0x77, 0x47, 0x82, 0xc7, 0xed, 0x7e, 0x97, 0x60, 0x81, 0x41, 0xcf, 0xc1, + 0x2c, 0x0b, 0xac, 0xa0, 0xc7, 0x84, 0x3d, 0xc5, 0xc6, 0x23, 0x61, 0x58, 0xda, 0x17, 0xd0, 0xb3, + 0x41, 0x6d, 0x21, 0x12, 0x27, 0x41, 0x58, 0x31, 0x70, 0x4f, 0xf7, 0x0f, 0xc4, 0x46, 0xd9, 0x37, + 0xe5, 0xb3, 0x17, 0xbe, 0x1f, 0xd9, 0xd8, 0xd3, 0x77, 0x47, 0x28, 0xf0, 0x18, 0x2e, 0x74, 0x02, + 0xc8, 0xb5, 0x58, 0x70, 0x9b, 0x5a, 0x1e, 0x13, 0xba, 0x6e, 0x3b, 0x1d, 0xa2, 0x2e, 0xfc, 0x97, + 0xd2, 0x9d, 0x38, 0xe7, 0x88, 0xf5, 0xde, 0x1a, 0x91, 0x86, 0xc7, 0x68, 0x40, 0x8f, 0xc3, 0x2c, + 0x25, 0x16, 0xf3, 0xbd, 0x6a, 0x5e, 0x2c, 0x3f, 0x8a, 0xca, 0x58, 0x40, 0xb1, 0xc2, 0xf2, 0x80, + 0xd6, 0x21, 0x8c, 0x59, 0xed, 0x30, 0xbc, 0x46, 0x01, 0x6d, 0x47, 0x82, 0x71, 0x88, 0x37, 0x7f, + 0x6b, 0x40, 0x65, 0x93, 0x12, 0x2b, 0x20, 0xd3, 0xb8, 0xc5, 0xa7, 0x3e, 0x71, 0xb4, 0x01, 0x0b, + 0xe2, 0xfb, 0xae, 0xe5, 0x3a, 0xb6, 0x3c, 0x83, 0x9c, 0x60, 0xfe, 0xbc, 0x62, 0x5e, 0xd8, 0x4a, + 0xa2, 0xf1, 0x30, 0xbd, 0xf9, 0x93, 0x2c, 0x54, 0x9a, 0xc4, 0x25, 0xb1, 0xc9, 0x5b, 0x80, 0xda, + 0xd4, 0x6a, 0x91, 0x3d, 0x42, 0x1d, 0xdf, 0xde, 0x27, 0x2d, 0xdf, 0xb3, 0x99, 0x70, 0xa3, 0x6c, + 0xe3, 0x73, 0x7c, 0x7f, 0x6f, 0x8e, 0x60, 0xf1, 0x18, 0x0e, 0xe4, 0x42, 0xa5, 0x4b, 0xc5, 0x6f, + 0xb1, 0xe7, 0xd2, 0xcb, 0x4a, 0x57, 0xbf, 0x92, 0xee, 0x48, 0xf7, 0x74, 0xd6, 0xc6, 0xd2, 0xe9, + 0xa0, 0x56, 0x49, 0x80, 0x70, 0x52, 0x38, 0xfa, 0x06, 0x2c, 0xfa, 0xb4, 0x7b, 0x64, 0x79, 0x4d, + 0xd2, 0x25, 0x9e, 0x4d, 0xbc, 0x80, 0x89, 0x8d, 0x2c, 0x34, 0x96, 0x79, 0x2e, 0xb2, 0x3b, 0x84, + 0xc3, 0x23, 0xd4, 0xe8, 0x35, 0x58, 0xea, 0x52, 0xbf, 0x6b, 0xb5, 0xc5, 0xc6, 0xec, 0xf9, 0xae, + 0xd3, 0xea, 0xab, 0xed, 0x7c, 0xf2, 0x74, 0x50, 0x5b, 0xda, 0x1b, 0x46, 0x9e, 0x0d, 0x6a, 0x17, + 0xc4, 0xd6, 0x71, 0x48, 0x8c, 0xc4, 0xa3, 0x62, 0x34, 0x37, 0xc8, 0x4f, 0x72, 0x03, 0x73, 0x1b, + 0x0a, 0xcd, 0x9e, 0xba, 0x13, 0x2f, 0x40, 0xc1, 0x56, 0xbf, 0xd5, 0xce, 0x87, 0x97, 0x33, 0xa2, + 0x39, 0x1b, 0xd4, 0x2a, 0x3c, 0xfd, 0xac, 0x87, 0x00, 0x1c, 0xb1, 0x98, 0x8f, 0x43, 0x41, 0x1c, + 0x3c, 0xbb, 0x7b, 0x05, 0x2d, 0x42, 0x16, 0x5b, 0xf7, 0x84, 0x94, 0x32, 0xe6, 0x3f, 0xb5, 0x28, + 0xb6, 0x0b, 0x70, 0x93, 0x04, 0xe1, 0xc1, 0x6f, 0xc0, 0x42, 0x18, 0xca, 0x93, 0x2f, 0x4c, 0xe4, + 0x4d, 0x38, 0x89, 0xc6, 0xc3, 0xf4, 0xe6, 0xeb, 0x50, 0x14, 0xaf, 0x10, 0x7f, 0xc2, 0xe3, 0x74, + 0xc1, 0xb8, 0x4f, 0xba, 0x10, 0xe6, 0x00, 0x99, 0x49, 0x39, 0x80, 0x66, 0xae, 0x0b, 0x15, 0xc9, + 0x1b, 0x26, 0x48, 0xa9, 0x34, 0x3c, 0x09, 0x85, 0xd0, 0x4c, 0xa5, 0x25, 0x4a, 0x8c, 0x43, 0x41, + 0x38, 0xa2, 0xd0, 0xb4, 0x1d, 0x41, 0xe2, 0x45, 0x4d, 0xa7, 0x4c, 0xcb, 0x7e, 0x32, 0xf7, 0xcf, + 0x7e, 0x34, 0x4d, 0x3f, 0x84, 0xea, 0xa4, 0x6c, 0xfa, 0x01, 0xde, 0xfc, 0xf4, 0xa6, 0x98, 0xef, + 0x18, 0xb0, 0xa8, 0x4b, 0x4a, 0x7f, 0x7c, 0xe9, 0x95, 0x9c, 0x9f, 0xed, 0x69, 0x3b, 0xf2, 0x2b, + 0x03, 0x96, 0x13, 0x4b, 0x9b, 0xea, 0xc4, 0xa7, 0x30, 0x4a, 0x77, 0x8e, 0xec, 0x14, 0xce, 0xf1, + 0x97, 0x0c, 0x54, 0x6e, 0x59, 0x07, 0xc4, 0xdd, 0x27, 0x2e, 0x69, 0x05, 0x3e, 0x45, 0x3f, 0x80, + 0x52, 0xc7, 0x0a, 0x5a, 0x47, 0x02, 0x1a, 0x56, 0x06, 0xcd, 0x74, 0xc1, 0x2e, 0x21, 0xa9, 0xbe, + 0x13, 0x8b, 0xb9, 0xe1, 0x05, 0xb4, 0xdf, 0xb8, 0xa0, 0x4c, 0x2a, 0x69, 0x18, 0xac, 0x6b, 0x13, + 0xe5, 0x9c, 0xf8, 0xbe, 0xf1, 0x56, 0x97, 0xa7, 0x2d, 0xd3, 0x57, 0x91, 0x09, 0x13, 0x30, 0x79, + 0xb3, 0xe7, 0x50, 0xd2, 0x21, 0x5e, 0x10, 0x97, 0x73, 0x3b, 0x43, 0xf2, 0xf1, 0x88, 0xc6, 0x95, + 0x17, 0x61, 0x71, 0xd8, 0x78, 0x1e, 0x7f, 0x8e, 0x49, 0x5f, 0x9e, 0x17, 0xe6, 0x3f, 0xd1, 0x32, + 0xe4, 0x4f, 0x2c, 0xb7, 0xa7, 0x6e, 0x23, 0x96, 0x1f, 0xd7, 0x33, 0xd7, 0x0c, 0xf3, 0x37, 0x06, + 0x54, 0x27, 0x19, 0x82, 0xbe, 0xa8, 0x09, 0x6a, 0x94, 0x94, 0x55, 0xd9, 0x57, 0x48, 0x5f, 0x4a, + 0xbd, 0x01, 0x05, 0xbf, 0xcb, 0x73, 0x0a, 0x9f, 0xaa, 0x53, 0x7f, 0x22, 0x3c, 0xc9, 0x5d, 0x05, + 0x3f, 0x1b, 0xd4, 0x2e, 0x26, 0xc4, 0x87, 0x08, 0x1c, 0xb1, 0xf2, 0x48, 0x2d, 0xec, 0xe1, 0xaf, + 0x47, 0x14, 0xa9, 0xef, 0x0a, 0x08, 0x56, 0x18, 0xf3, 0xf7, 0x06, 0xe4, 0x44, 0x42, 0xfe, 0x3a, + 0x14, 0xf8, 0xfe, 0xd9, 0x56, 0x60, 0x09, 0xbb, 0x52, 0x97, 0x82, 0x9c, 0x7b, 0x87, 0x04, 0x56, + 0xec, 0x6d, 0x21, 0x04, 0x47, 0x12, 0x11, 0x86, 0xbc, 0x13, 0x90, 0x4e, 0x78, 0x90, 0x4f, 0x4d, + 0x14, 0xad, 0x1a, 0x11, 0x75, 0x6c, 0xdd, 0xbb, 0xf1, 0x56, 0x40, 0x3c, 0x7e, 0x18, 0xf1, 0xd5, + 0xd8, 0xe6, 0x32, 0xb0, 0x14, 0x65, 0xfe, 0xcb, 0x80, 0x48, 0x15, 0x77, 0x7e, 0x46, 0xdc, 0xc3, + 0x5b, 0x8e, 0x77, 0xac, 0xb6, 0x35, 0x32, 0x67, 0x5f, 0xc1, 0x71, 0x44, 0x31, 0xee, 0x79, 0xc8, + 0x4c, 0xf7, 0x3c, 0x70, 0x85, 0x2d, 0xdf, 0x0b, 0x1c, 0xaf, 0x37, 0x72, 0xdb, 0x36, 0x15, 0x1c, + 0x47, 0x14, 0x3c, 0x11, 0xa1, 0xa4, 0x63, 0x39, 0x9e, 0xe3, 0xb5, 0xf9, 0x22, 0x36, 0xfd, 0x9e, + 0x17, 0x88, 0x17, 0x59, 0x25, 0x22, 0x78, 0x04, 0x8b, 0xc7, 0x70, 0x98, 0xff, 0xce, 0x41, 0x89, + 0xaf, 0x39, 0x7c, 0xe7, 0x9e, 0x87, 0x8a, 0xab, 0x7b, 0x81, 0x5a, 0xfb, 0x45, 0x65, 0x4a, 0xf2, + 0x5e, 0xe3, 0x24, 0x2d, 0x67, 0x16, 0x29, 0x54, 0xc4, 0x9c, 0x49, 0x32, 0x6f, 0xe9, 0x48, 0x9c, + 0xa4, 0xe5, 0xd1, 0xeb, 0x1e, 0xbf, 0x1f, 0x2a, 0x33, 0x89, 0x8e, 0xe8, 0x9b, 0x1c, 0x88, 0x25, + 0x0e, 0xed, 0xc0, 0x05, 0xcb, 0x75, 0xfd, 0x7b, 0x02, 0xd8, 0xf0, 0xfd, 0xe3, 0x8e, 0x45, 0x8f, + 0x99, 0x28, 0xa6, 0x0b, 0x8d, 0x2f, 0x28, 0x96, 0x0b, 0x1b, 0xa3, 0x24, 0x78, 0x1c, 0xdf, 0xb8, + 0x63, 0xcb, 0x4d, 0x79, 0x6c, 0x47, 0xb0, 0x3c, 0x04, 0x12, 0xb7, 0x5c, 0x55, 0xb6, 0xcf, 0x28, + 0x39, 0xcb, 0x78, 0x0c, 0xcd, 0xd9, 0x04, 0x38, 0x1e, 0x2b, 0x11, 0x5d, 0x87, 0x79, 0xee, 0xc9, + 0x7e, 0x2f, 0x08, 0xf3, 0xce, 0xbc, 0x38, 0x6e, 0x74, 0x3a, 0xa8, 0xcd, 0xdf, 0x4e, 0x60, 0xf0, + 0x10, 0x25, 0xdf, 0x5c, 0xd7, 0xe9, 0x38, 0x41, 0x75, 0x4e, 0xb0, 0x44, 0x9b, 0x7b, 0x8b, 0x03, + 0xb1, 0xc4, 0x25, 0x3c, 0xb0, 0x70, 0xae, 0x07, 0x6e, 0xc2, 0x12, 0x23, 0x9e, 0xbd, 0xed, 0x39, + 0x81, 0x63, 0xb9, 0x37, 0x4e, 0x44, 0x56, 0x59, 0x12, 0x07, 0x71, 0x91, 0xa7, 0x84, 0xfb, 0xc3, + 0x48, 0x3c, 0x4a, 0x6f, 0xfe, 0x39, 0x0b, 0x48, 0x26, 0xec, 0xb6, 0x4c, 0xca, 0x64, 0x5c, 0xe4, + 0x65, 0x85, 0x4a, 0xf8, 0x8d, 0xa1, 0xb2, 0x42, 0xe5, 0xfa, 0x21, 0x1e, 0xed, 0x40, 0x51, 0xc6, + 0xa7, 0xf8, 0xce, 0xad, 0x2b, 0xe2, 0xe2, 0x6e, 0x88, 0x38, 0x1b, 0xd4, 0x56, 0x12, 0x6a, 0x22, + 0x8c, 0x28, 0xf9, 0x62, 0x09, 0xe8, 0x2a, 0x80, 0xd5, 0x75, 0xf4, 0xa6, 0x5f, 0x31, 0x6e, 0xfd, + 0xc4, 0xe5, 0x3b, 0xd6, 0xa8, 0xd0, 0x4b, 0x90, 0x0b, 0x3e, 0x5d, 0x59, 0x56, 0x10, 0x55, 0x27, + 0x2f, 0xc2, 0x84, 0x04, 0xae, 0x5d, 0x5c, 0x0a, 0xc6, 0xcd, 0x52, 0x15, 0x55, 0xa4, 0x7d, 0x2b, + 0xc2, 0x60, 0x8d, 0x0a, 0x7d, 0x0b, 0x0a, 0x87, 0x2a, 0x9f, 0x15, 0xa7, 0x9b, 0x3a, 0xce, 0x86, + 0x59, 0xb0, 0xec, 0x3b, 0x84, 0x5f, 0x38, 0x92, 0x86, 0xbe, 0x0a, 0x25, 0xd6, 0x3b, 0x88, 0x52, + 0x00, 0xe9, 0x12, 0xd1, 0x7b, 0xbb, 0x1f, 0xa3, 0xb0, 0x4e, 0x67, 0xbe, 0x09, 0xc5, 0x1d, 0xa7, + 0x45, 0x7d, 0x51, 0x48, 0x3e, 0x01, 0x73, 0x2c, 0x51, 0x25, 0x45, 0x27, 0x19, 0xba, 0x6a, 0x88, + 0xe7, 0x3e, 0xea, 0x59, 0x9e, 0x2f, 0x6b, 0xa1, 0x7c, 0xec, 0xa3, 0xaf, 0x72, 0x20, 0x96, 0xb8, + 0xeb, 0xcb, 0x3c, 0xcb, 0xf8, 0xe9, 0xfb, 0xb5, 0x99, 0x77, 0xdf, 0xaf, 0xcd, 0xbc, 0xf7, 0xbe, + 0xca, 0x38, 0xfe, 0x00, 0x00, 0xbb, 0x07, 0xdf, 0x23, 0x2d, 0x19, 0xbb, 0x53, 0xf5, 0x06, 0xc3, + 0x96, 0xb4, 0xe8, 0x0d, 0x66, 0x86, 0x32, 0x47, 0x0d, 0x87, 0x13, 0x94, 0x68, 0x1d, 0x8a, 0x51, + 0xd7, 0x4f, 0xf9, 0xc7, 0x52, 0xe8, 0x6f, 0x51, 0x6b, 0x10, 0xc7, 0x34, 0x89, 0x87, 0x24, 0x77, + 0xee, 0x43, 0xd2, 0x80, 0x6c, 0xcf, 0xb1, 0x55, 0xd5, 0xfd, 0x74, 0xf8, 0x90, 0xdf, 0xd9, 0x6e, + 0x9e, 0x0d, 0x6a, 0x8f, 0x4c, 0x6a, 0xb6, 0x07, 0xfd, 0x2e, 0x61, 0xf5, 0x3b, 0xdb, 0x4d, 0xcc, + 0x99, 0xc7, 0x45, 0xb5, 0xd9, 0x29, 0xa3, 0xda, 0x55, 0x80, 0x76, 0xdc, 0xbb, 0x90, 0x41, 0x23, + 0x72, 0x44, 0xad, 0x67, 0xa1, 0x51, 0x21, 0x06, 0x4b, 0x2d, 0x5e, 0xdf, 0xab, 0x1e, 0x02, 0x0b, + 0xac, 0x8e, 0xec, 0x86, 0x4e, 0x77, 0x27, 0x2e, 0x29, 0x35, 0x4b, 0x9b, 0xc3, 0xc2, 0xf0, 0xa8, + 0x7c, 0xe4, 0xc3, 0x92, 0xad, 0xca, 0xcc, 0x58, 0x69, 0x71, 0x6a, 0xa5, 0x22, 0x62, 0x35, 0x87, + 0x05, 0xe1, 0x51, 0xd9, 0xe8, 0xbb, 0xb0, 0x12, 0x02, 0x47, 0x6b, 0x7d, 0x11, 0xf5, 0xb3, 0x8d, + 0xd5, 0xd3, 0x41, 0x6d, 0xa5, 0x39, 0x91, 0x0a, 0xdf, 0x47, 0x02, 0xb2, 0x61, 0xd6, 0x95, 0x59, + 0x72, 0x49, 0x64, 0x36, 0x5f, 0x4b, 0xb7, 0x8a, 0xd8, 0xfb, 0xeb, 0x7a, 0x76, 0x1c, 0xf5, 0x6d, + 0x54, 0x62, 0xac, 0x64, 0xa3, 0xb7, 0xa0, 0x64, 0x79, 0x9e, 0x1f, 0x58, 0xb2, 0xfb, 0x50, 0x16, + 0xaa, 0x36, 0xa6, 0x56, 0xb5, 0x11, 0xcb, 0x18, 0xca, 0xc6, 0x35, 0x0c, 0xd6, 0x55, 0xa1, 0x7b, + 0xb0, 0xe0, 0xdf, 0xf3, 0x08, 0xc5, 0xe4, 0x90, 0x50, 0xe2, 0xb5, 0x08, 0xab, 0x56, 0x84, 0xf6, + 0x67, 0x52, 0x6a, 0x4f, 0x30, 0xc7, 0x2e, 0x9d, 0x84, 0x33, 0x3c, 0xac, 0x05, 0xd5, 0x79, 0x6c, + 0xf5, 0x2c, 0xd7, 0xf9, 0x3e, 0xa1, 0xac, 0x3a, 0x1f, 0x37, 0xac, 0xb7, 0x22, 0x28, 0xd6, 0x28, + 0x50, 0x0f, 0x2a, 0x1d, 0xfd, 0xc9, 0xa8, 0x2e, 0x09, 0x33, 0xaf, 0xa5, 0x33, 0x73, 0xf4, 0x51, + 0x8b, 0xd3, 0xa0, 0x04, 0x0e, 0x27, 0xb5, 0xac, 0x3c, 0x07, 0xa5, 0x4f, 0x59, 0x21, 0xf0, 0x0a, + 0x63, 0xf8, 0x40, 0xa6, 0xaa, 0x30, 0xfe, 0x98, 0x81, 0xf9, 0xe4, 0x36, 0x0e, 0x3d, 0x87, 0xf9, + 0x54, 0xcf, 0x61, 0x58, 0xcb, 0x1a, 0x13, 0x27, 0x17, 0x61, 0x7c, 0xce, 0x4e, 0x8c, 0xcf, 0x2a, + 0x0c, 0xe6, 0x1e, 0x24, 0x0c, 0xd6, 0x01, 0x78, 0xb2, 0x42, 0x7d, 0xd7, 0x25, 0x54, 0x44, 0xc0, + 0x82, 0x9a, 0x50, 0x44, 0x50, 0xac, 0x51, 0xf0, 0x94, 0xfa, 0xc0, 0xf5, 0x5b, 0xc7, 0x62, 0x0b, + 0xc2, 0xdb, 0x2b, 0x62, 0x5f, 0x41, 0xa6, 0xd4, 0x8d, 0x11, 0x2c, 0x1e, 0xc3, 0x61, 0xf6, 0xe1, + 0xe2, 0x9e, 0x45, 0x79, 0x92, 0x13, 0xdf, 0x14, 0x51, 0xb3, 0xbc, 0x31, 0x52, 0x11, 0x3d, 0x3d, + 0xed, 0x8d, 0x8b, 0x37, 0x3f, 0x86, 0xc5, 0x55, 0x91, 0xf9, 0x57, 0x03, 0x2e, 0x8d, 0xd5, 0xfd, + 0x19, 0x54, 0x64, 0x6f, 0x24, 0x2b, 0xb2, 0xe7, 0x53, 0xb6, 0x32, 0xc7, 0x59, 0x3b, 0xa1, 0x3e, + 0x9b, 0x83, 0xfc, 0x1e, 0xcf, 0x84, 0xcd, 0x0f, 0x0d, 0x28, 0x8b, 0x5f, 0xd3, 0x74, 0x92, 0x6b, + 0xc9, 0x01, 0x43, 0xf1, 0xe1, 0x0d, 0x17, 0x1e, 0x46, 0xab, 0xf9, 0x1d, 0x03, 0x92, 0x3d, 0x5c, + 0xf4, 0xa2, 0xbc, 0x02, 0x46, 0xd4, 0x64, 0x9d, 0xd2, 0xfd, 0x5f, 0x98, 0x54, 0x92, 0x5e, 0x48, + 0xd5, 0xad, 0x7c, 0x12, 0x8a, 0xd8, 0xf7, 0x83, 0x3d, 0x2b, 0x38, 0x62, 0x7c, 0xef, 0xba, 0xfc, + 0x87, 0xda, 0x5e, 0xb1, 0x77, 0x02, 0x83, 0x25, 0xdc, 0xfc, 0xb9, 0x01, 0x97, 0x26, 0xce, 0x8d, + 0x78, 0x14, 0x69, 0x45, 0x5f, 0x6a, 0x45, 0x91, 0x23, 0xc7, 0x74, 0x58, 0xa3, 0xe2, 0xb5, 0x64, + 0x62, 0xd8, 0x34, 0x5c, 0x4b, 0x26, 0xb4, 0xe1, 0x24, 0xad, 0xf9, 0xcf, 0x0c, 0xa8, 0x41, 0xcd, + 0xff, 0xd8, 0xe9, 0x1f, 0x1f, 0x1a, 0x13, 0xcd, 0x27, 0xc7, 0x44, 0xd1, 0x4c, 0x48, 0x9b, 0x93, + 0x64, 0xef, 0x3f, 0x27, 0x41, 0xcf, 0x46, 0xa3, 0x17, 0xe9, 0x43, 0xab, 0xc9, 0xd1, 0xcb, 0xd9, + 0xa0, 0x56, 0x56, 0xc2, 0x93, 0xa3, 0x98, 0xd7, 0x60, 0xce, 0x26, 0x81, 0xe5, 0xb8, 0xb2, 0x2e, + 0x4c, 0x3d, 0x4c, 0x90, 0xc2, 0x9a, 0x92, 0xb5, 0x51, 0xe2, 0x36, 0xa9, 0x0f, 0x1c, 0x0a, 0xe4, + 0x01, 0xbb, 0xe5, 0xdb, 0xb2, 0x22, 0xc9, 0xc7, 0x01, 0x7b, 0xd3, 0xb7, 0x09, 0x16, 0x18, 0xf3, + 0x5d, 0x03, 0x4a, 0x52, 0xd2, 0xa6, 0xd5, 0x63, 0x04, 0x5d, 0x89, 0x56, 0x21, 0x8f, 0xfb, 0x92, + 0x3e, 0x63, 0x3b, 0x1b, 0xd4, 0x8a, 0x82, 0x4c, 0x14, 0x33, 0x63, 0x66, 0x49, 0x99, 0x73, 0xf6, + 0xe8, 0x51, 0xc8, 0x8b, 0x0b, 0xa4, 0x36, 0x33, 0x1e, 0x16, 0x72, 0x20, 0x96, 0x38, 0xf3, 0xe3, + 0x0c, 0x54, 0x12, 0x8b, 0x4b, 0x51, 0x17, 0x44, 0x2d, 0xd4, 0x4c, 0x8a, 0xb6, 0xfc, 0xe4, 0xd1, + 0xbc, 0x7a, 0xbe, 0x66, 0x1f, 0xe4, 0xf9, 0xfa, 0x36, 0xcc, 0xb6, 0xf8, 0x1e, 0x85, 0xff, 0xf4, + 0xb8, 0x32, 0xcd, 0x71, 0x8a, 0xdd, 0x8d, 0xbd, 0x51, 0x7c, 0x32, 0xac, 0x04, 0xa2, 0x9b, 0xb0, + 0x44, 0x49, 0x40, 0xfb, 0x1b, 0x87, 0x01, 0xa1, 0x7a, 0x33, 0x21, 0x1f, 0x67, 0xdf, 0x78, 0x98, + 0x00, 0x8f, 0xf2, 0x98, 0x07, 0x50, 0xbe, 0x6d, 0x1d, 0xb8, 0xd1, 0x78, 0x0c, 0x43, 0xc5, 0xf1, + 0x5a, 0x6e, 0xcf, 0x26, 0x32, 0xa0, 0x87, 0xd1, 0x2b, 0xbc, 0xb4, 0xdb, 0x3a, 0xf2, 0x6c, 0x50, + 0xbb, 0x90, 0x00, 0xc8, 0x79, 0x10, 0x4e, 0x8a, 0x30, 0x5d, 0xc8, 0x7d, 0x86, 0x95, 0xe4, 0x77, + 0xa0, 0x18, 0xe7, 0xfa, 0x0f, 0x59, 0xa5, 0xf9, 0x06, 0x14, 0xb8, 0xc7, 0x87, 0x35, 0xea, 0x39, + 0x59, 0x52, 0x32, 0xf7, 0xca, 0xa4, 0xc9, 0xbd, 0xc4, 0x90, 0xf5, 0x4e, 0xd7, 0x7e, 0xc0, 0x21, + 0x6b, 0xe6, 0x41, 0x5e, 0xbe, 0xec, 0x94, 0x2f, 0xdf, 0x55, 0x90, 0x7f, 0x44, 0xe1, 0x8f, 0x8c, + 0x4c, 0x20, 0xb4, 0x47, 0x46, 0x7f, 0xff, 0xb5, 0x09, 0xc3, 0x8f, 0x0d, 0x00, 0xd1, 0xca, 0x13, + 0x6d, 0xa4, 0x14, 0xe3, 0xfc, 0x3b, 0x30, 0xeb, 0x4b, 0x8f, 0x94, 0x83, 0xd6, 0x29, 0xfb, 0xc5, + 0xd1, 0x45, 0x92, 0x3e, 0x89, 0x95, 0xb0, 0xc6, 0xcb, 0x1f, 0x7c, 0xb2, 0x3a, 0xf3, 0xe1, 0x27, + 0xab, 0x33, 0x1f, 0x7d, 0xb2, 0x3a, 0xf3, 0xf6, 0xe9, 0xaa, 0xf1, 0xc1, 0xe9, 0xaa, 0xf1, 0xe1, + 0xe9, 0xaa, 0xf1, 0xd1, 0xe9, 0xaa, 0xf1, 0xf1, 0xe9, 0xaa, 0xf1, 0xee, 0xdf, 0x57, 0x67, 0x5e, + 0x7b, 0x2c, 0xcd, 0x1f, 0xfc, 0xfe, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x28, 0x27, 0x65, 0xab, 0x20, + 0x28, 0x00, 0x00, } func (m *APIGroup) Marshal() (dAtA []byte, err error) { @@ -2503,6 +2505,16 @@ func (m *ListOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.SendInitialEvents != nil { + i-- + if *m.SendInitialEvents { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + } i -= len(m.ResourceVersionMatch) copy(dAtA[i:], m.ResourceVersionMatch) i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersionMatch))) @@ -3908,6 +3920,9 @@ func (m *ListOptions) Size() (n int) { n += 2 l = len(m.ResourceVersionMatch) n += 1 + l + sovGenerated(uint64(l)) + if m.SendInitialEvents != nil { + n += 2 + } return n } @@ -4517,6 +4532,7 @@ func (this *ListOptions) String() string { `Continue:` + fmt.Sprintf("%v", this.Continue) + `,`, `AllowWatchBookmarks:` + fmt.Sprintf("%v", this.AllowWatchBookmarks) + `,`, `ResourceVersionMatch:` + fmt.Sprintf("%v", this.ResourceVersionMatch) + `,`, + `SendInitialEvents:` + valueToStringGenerated(this.SendInitialEvents) + `,`, `}`, }, "") return s @@ -8250,6 +8266,27 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } m.ResourceVersionMatch = ResourceVersionMatch(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SendInitialEvents", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.SendInitialEvents = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto index 2be188a6a8..48955dca85 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto @@ -246,19 +246,16 @@ message CreateOptions { // fieldValidation instructs the server on how to handle // objects in the request (POST/PUT/PATCH) containing unknown - // or duplicate fields, provided that the `ServerSideFieldValidation` - // feature gate is also enabled. Valid values are: + // or duplicate fields. Valid values are: // - Ignore: This will ignore any unknown fields that are silently // dropped from the object, and will ignore all but the last duplicate // field that the decoder encounters. This is the default behavior - // prior to v1.23 and is the default behavior when the - // `ServerSideFieldValidation` feature gate is disabled. + // prior to v1.23. // - Warn: This will send a warning via the standard warning response // header for each unknown field that is dropped from the object, and // for each duplicate field that is encountered. The request will // still succeed if there are no other errors, and will only persist - // the last of any duplicate fields. This is the default when the - // `ServerSideFieldValidation` feature gate is enabled. + // the last of any duplicate fields. This is the default in v1.23+ // - Strict: This will fail the request with a BadRequest error if // any unknown fields would be dropped from the object, or if any // duplicate fields are present. The error returned from the server @@ -575,6 +572,32 @@ message ListOptions { // This field is not supported when watch is true. Clients may start a watch from the last // resourceVersion value returned by the server and not miss any modifications. optional string continue = 8; + + // `sendInitialEvents=true` may be set together with `watch=true`. + // In that case, the watch stream will begin with synthetic events to + // produce the current state of objects in the collection. Once all such + // events have been sent, a synthetic "Bookmark" event will be sent. + // The bookmark will report the ResourceVersion (RV) corresponding to the + // set of objects, and be marked with `"k8s.io/initial-events-end": "true"` annotation. + // Afterwards, the watch stream will proceed as usual, sending watch events + // corresponding to changes (subsequent to the RV) to objects watched. + // + // When `sendInitialEvents` option is set, we require `resourceVersionMatch` + // option to also be set. The semantic of the watch request is as following: + // - `resourceVersionMatch` = NotOlderThan + // is interpreted as "data at least as new as the provided `resourceVersion`" + // and the bookmark event is send when the state is synced + // to a `resourceVersion` at least as fresh as the one provided by the ListOptions. + // If `resourceVersion` is unset, this is interpreted as "consistent read" and the + // bookmark event is send when the state is synced at least to the moment + // when request started being processed. + // - `resourceVersionMatch` set to any other value or unset + // Invalid error is returned. + // + // Defaults to true if `resourceVersion=""` or `resourceVersion="0"` (for backward + // compatibility reasons) and to false otherwise. + // +optional + optional bool sendInitialEvents = 11; } // ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource @@ -645,7 +668,7 @@ message ObjectMeta { // automatically. Name is primarily intended for creation idempotence and configuration // definition. // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names // +optional optional string name = 1; @@ -671,7 +694,7 @@ message ObjectMeta { // // Must be a DNS_LABEL. // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/namespaces + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces // +optional optional string namespace = 3; @@ -685,7 +708,7 @@ message ObjectMeta { // // Populated by the system. // Read-only. - // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids // +optional optional string uid = 5; @@ -749,14 +772,14 @@ message ObjectMeta { // Map of string keys and values that can be used to organize and categorize // (scope and select) objects. May match selectors of replication controllers // and services. - // More info: http://kubernetes.io/docs/user-guide/labels + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels // +optional map labels = 11; // Annotations is an unstructured key value map stored with a resource that may be // set by external tools to store and retrieve arbitrary metadata. They are not // queryable and should be preserved when modifying objects. - // More info: http://kubernetes.io/docs/user-guide/annotations + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations // +optional map annotations = 12; @@ -811,11 +834,11 @@ message OwnerReference { optional string kind = 1; // Name of the referent. - // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names optional string name = 3; // UID of the referent. - // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids optional string uid = 4; // If true, this reference points to the managing controller. @@ -889,19 +912,16 @@ message PatchOptions { // fieldValidation instructs the server on how to handle // objects in the request (POST/PUT/PATCH) containing unknown - // or duplicate fields, provided that the `ServerSideFieldValidation` - // feature gate is also enabled. Valid values are: + // or duplicate fields. Valid values are: // - Ignore: This will ignore any unknown fields that are silently // dropped from the object, and will ignore all but the last duplicate // field that the decoder encounters. This is the default behavior - // prior to v1.23 and is the default behavior when the - // `ServerSideFieldValidation` feature gate is disabled. + // prior to v1.23. // - Warn: This will send a warning via the standard warning response // header for each unknown field that is dropped from the object, and // for each duplicate field that is encountered. The request will // still succeed if there are no other errors, and will only persist - // the last of any duplicate fields. This is the default when the - // `ServerSideFieldValidation` feature gate is enabled. + // the last of any duplicate fields. This is the default in v1.23+ // - Strict: This will fail the request with a BadRequest error if // any unknown fields would be dropped from the object, or if any // duplicate fields are present. The error returned from the server @@ -1024,7 +1044,7 @@ message StatusDetails { // UID of the resource. // (when there is a single resource which can be described). - // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids // +optional optional string uid = 6; @@ -1128,19 +1148,16 @@ message UpdateOptions { // fieldValidation instructs the server on how to handle // objects in the request (POST/PUT/PATCH) containing unknown - // or duplicate fields, provided that the `ServerSideFieldValidation` - // feature gate is also enabled. Valid values are: + // or duplicate fields. Valid values are: // - Ignore: This will ignore any unknown fields that are silently // dropped from the object, and will ignore all but the last duplicate // field that the decoder encounters. This is the default behavior - // prior to v1.23 and is the default behavior when the - // `ServerSideFieldValidation` feature gate is disabled. + // prior to v1.23. // - Warn: This will send a warning via the standard warning response // header for each unknown field that is dropped from the object, and // for each duplicate field that is encountered. The request will // still succeed if there are no other errors, and will only persist - // the last of any duplicate fields. This is the default when the - // `ServerSideFieldValidation` feature gate is enabled. + // the last of any duplicate fields. This is the default in v1.23+ // - Strict: This will fail the request with a BadRequest error if // any unknown fields would be dropped from the object, or if any // duplicate fields are present. The error returned from the server diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go index 152f99296c..352d58ebc2 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -114,7 +114,7 @@ type ObjectMeta struct { // automatically. Name is primarily intended for creation idempotence and configuration // definition. // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names // +optional Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` @@ -140,7 +140,7 @@ type ObjectMeta struct { // // Must be a DNS_LABEL. // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/namespaces + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces // +optional Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"` @@ -154,7 +154,7 @@ type ObjectMeta struct { // // Populated by the system. // Read-only. - // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids // +optional UID types.UID `json:"uid,omitempty" protobuf:"bytes,5,opt,name=uid,casttype=k8s.io/kubernetes/pkg/types.UID"` @@ -218,14 +218,14 @@ type ObjectMeta struct { // Map of string keys and values that can be used to organize and categorize // (scope and select) objects. May match selectors of replication controllers // and services. - // More info: http://kubernetes.io/docs/user-guide/labels + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels // +optional Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,11,rep,name=labels"` // Annotations is an unstructured key value map stored with a resource that may be // set by external tools to store and retrieve arbitrary metadata. They are not // queryable and should be preserved when modifying objects. - // More info: http://kubernetes.io/docs/user-guide/annotations + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations // +optional Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,12,rep,name=annotations"` @@ -295,10 +295,10 @@ type OwnerReference struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` // Name of the referent. - // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names Name string `json:"name" protobuf:"bytes,3,opt,name=name"` // UID of the referent. - // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids UID types.UID `json:"uid" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` // If true, this reference points to the managing controller. // +optional @@ -400,6 +400,32 @@ type ListOptions struct { // This field is not supported when watch is true. Clients may start a watch from the last // resourceVersion value returned by the server and not miss any modifications. Continue string `json:"continue,omitempty" protobuf:"bytes,8,opt,name=continue"` + + // `sendInitialEvents=true` may be set together with `watch=true`. + // In that case, the watch stream will begin with synthetic events to + // produce the current state of objects in the collection. Once all such + // events have been sent, a synthetic "Bookmark" event will be sent. + // The bookmark will report the ResourceVersion (RV) corresponding to the + // set of objects, and be marked with `"k8s.io/initial-events-end": "true"` annotation. + // Afterwards, the watch stream will proceed as usual, sending watch events + // corresponding to changes (subsequent to the RV) to objects watched. + // + // When `sendInitialEvents` option is set, we require `resourceVersionMatch` + // option to also be set. The semantic of the watch request is as following: + // - `resourceVersionMatch` = NotOlderThan + // is interpreted as "data at least as new as the provided `resourceVersion`" + // and the bookmark event is send when the state is synced + // to a `resourceVersion` at least as fresh as the one provided by the ListOptions. + // If `resourceVersion` is unset, this is interpreted as "consistent read" and the + // bookmark event is send when the state is synced at least to the moment + // when request started being processed. + // - `resourceVersionMatch` set to any other value or unset + // Invalid error is returned. + // + // Defaults to true if `resourceVersion=""` or `resourceVersion="0"` (for backward + // compatibility reasons) and to false otherwise. + // +optional + SendInitialEvents *bool `json:"sendInitialEvents,omitempty" protobuf:"varint,11,opt,name=sendInitialEvents"` } // resourceVersionMatch specifies how the resourceVersion parameter is applied. resourceVersionMatch @@ -542,19 +568,16 @@ type CreateOptions struct { // fieldValidation instructs the server on how to handle // objects in the request (POST/PUT/PATCH) containing unknown - // or duplicate fields, provided that the `ServerSideFieldValidation` - // feature gate is also enabled. Valid values are: + // or duplicate fields. Valid values are: // - Ignore: This will ignore any unknown fields that are silently // dropped from the object, and will ignore all but the last duplicate // field that the decoder encounters. This is the default behavior - // prior to v1.23 and is the default behavior when the - // `ServerSideFieldValidation` feature gate is disabled. + // prior to v1.23. // - Warn: This will send a warning via the standard warning response // header for each unknown field that is dropped from the object, and // for each duplicate field that is encountered. The request will // still succeed if there are no other errors, and will only persist - // the last of any duplicate fields. This is the default when the - // `ServerSideFieldValidation` feature gate is enabled. + // the last of any duplicate fields. This is the default in v1.23+ // - Strict: This will fail the request with a BadRequest error if // any unknown fields would be dropped from the object, or if any // duplicate fields are present. The error returned from the server @@ -597,19 +620,16 @@ type PatchOptions struct { // fieldValidation instructs the server on how to handle // objects in the request (POST/PUT/PATCH) containing unknown - // or duplicate fields, provided that the `ServerSideFieldValidation` - // feature gate is also enabled. Valid values are: + // or duplicate fields. Valid values are: // - Ignore: This will ignore any unknown fields that are silently // dropped from the object, and will ignore all but the last duplicate // field that the decoder encounters. This is the default behavior - // prior to v1.23 and is the default behavior when the - // `ServerSideFieldValidation` feature gate is disabled. + // prior to v1.23. // - Warn: This will send a warning via the standard warning response // header for each unknown field that is dropped from the object, and // for each duplicate field that is encountered. The request will // still succeed if there are no other errors, and will only persist - // the last of any duplicate fields. This is the default when the - // `ServerSideFieldValidation` feature gate is enabled. + // the last of any duplicate fields. This is the default in v1.23+ // - Strict: This will fail the request with a BadRequest error if // any unknown fields would be dropped from the object, or if any // duplicate fields are present. The error returned from the server @@ -674,19 +694,16 @@ type UpdateOptions struct { // fieldValidation instructs the server on how to handle // objects in the request (POST/PUT/PATCH) containing unknown - // or duplicate fields, provided that the `ServerSideFieldValidation` - // feature gate is also enabled. Valid values are: + // or duplicate fields. Valid values are: // - Ignore: This will ignore any unknown fields that are silently // dropped from the object, and will ignore all but the last duplicate // field that the decoder encounters. This is the default behavior - // prior to v1.23 and is the default behavior when the - // `ServerSideFieldValidation` feature gate is disabled. + // prior to v1.23. // - Warn: This will send a warning via the standard warning response // header for each unknown field that is dropped from the object, and // for each duplicate field that is encountered. The request will // still succeed if there are no other errors, and will only persist - // the last of any duplicate fields. This is the default when the - // `ServerSideFieldValidation` feature gate is enabled. + // the last of any duplicate fields. This is the default in v1.23+ // - Strict: This will fail the request with a BadRequest error if // any unknown fields would be dropped from the object, or if any // duplicate fields are present. The error returned from the server @@ -761,7 +778,7 @@ type StatusDetails struct { Kind string `json:"kind,omitempty" protobuf:"bytes,3,opt,name=kind"` // UID of the resource. // (when there is a single resource which can be described). - // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids // +optional UID types.UID `json:"uid,omitempty" protobuf:"bytes,6,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` // The Causes array includes more details associated with the StatusReason diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go index 9570726a0d..b736e83712 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_APIGroup = map[string]string{ @@ -115,7 +115,7 @@ var map_CreateOptions = map[string]string{ "": "CreateOptions may be provided when creating an API object.", "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", "fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", - "fieldValidation": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields, provided that the `ServerSideFieldValidation` feature gate is also enabled. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23 and is the default behavior when the `ServerSideFieldValidation` feature gate is disabled. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default when the `ServerSideFieldValidation` feature gate is enabled. - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", + "fieldValidation": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", } func (CreateOptions) SwaggerDoc() map[string]string { @@ -216,6 +216,7 @@ var map_ListOptions = map[string]string{ "timeoutSeconds": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", "limit": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", "continue": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", + "sendInitialEvents": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\n\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\n is interpreted as \"data at least as new as the provided `resourceVersion`\"\n and the bookmark event is send when the state is synced\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\n If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the\n bookmark event is send when the state is synced at least to the moment\n when request started being processed.\n- `resourceVersionMatch` set to any other value or unset\n Invalid error is returned.\n\nDefaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.", } func (ListOptions) SwaggerDoc() map[string]string { @@ -239,18 +240,18 @@ func (ManagedFieldsEntry) SwaggerDoc() map[string]string { var map_ObjectMeta = map[string]string{ "": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.", - "name": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "name": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names", "generateName": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will return a 409.\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency", - "namespace": "Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces", + "namespace": "Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces", "selfLink": "Deprecated: selfLink is a legacy read-only field that is no longer populated by the system.", - "uid": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", + "uid": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids", "resourceVersion": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency", "generation": "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.", "creationTimestamp": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "deletionTimestamp": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "deletionGracePeriodSeconds": "Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.", - "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels", - "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", + "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels", + "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations", "ownerReferences": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.", "finalizers": "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list.", "managedFields": "ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object.", @@ -264,8 +265,8 @@ var map_OwnerReference = map[string]string{ "": "OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field.", "apiVersion": "API version of the referent.", "kind": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "name": "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names", - "uid": "UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", + "name": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names", + "uid": "UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids", "controller": "If true, this reference points to the managing controller.", "blockOwnerDeletion": "If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.", } @@ -306,7 +307,7 @@ var map_PatchOptions = map[string]string{ "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", "force": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.", "fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).", - "fieldValidation": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields, provided that the `ServerSideFieldValidation` feature gate is also enabled. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23 and is the default behavior when the `ServerSideFieldValidation` feature gate is disabled. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default when the `ServerSideFieldValidation` feature gate is enabled. - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", + "fieldValidation": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", } func (PatchOptions) SwaggerDoc() map[string]string { @@ -372,7 +373,7 @@ var map_StatusDetails = map[string]string{ "name": "The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described).", "group": "The group attribute of the resource associated with the status StatusReason.", "kind": "The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "uid": "UID of the resource. (when there is a single resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids", + "uid": "UID of the resource. (when there is a single resource which can be described). More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids", "causes": "The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes.", "retryAfterSeconds": "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action.", } @@ -451,7 +452,7 @@ var map_UpdateOptions = map[string]string{ "": "UpdateOptions may be provided when updating an API object. All fields in UpdateOptions should also be present in PatchOptions.", "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", "fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", - "fieldValidation": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields, provided that the `ServerSideFieldValidation` feature gate is also enabled. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23 and is the default behavior when the `ServerSideFieldValidation` feature gate is disabled. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default when the `ServerSideFieldValidation` feature gate is enabled. - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", + "fieldValidation": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", } func (UpdateOptions) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go index b7590f0b31..afe01ed5a4 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go @@ -426,6 +426,13 @@ func autoConvert_url_Values_To_v1_ListOptions(in *url.Values, out *ListOptions, } else { out.Continue = "" } + if values, ok := map[string][]string(*in)["sendInitialEvents"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_Pointer_bool(&values, &out.SendInitialEvents, s); err != nil { + return err + } + } else { + out.SendInitialEvents = nil + } return nil } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go index 418e6099f4..7d29c504ab 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go @@ -602,6 +602,11 @@ func (in *ListOptions) DeepCopyInto(out *ListOptions) { *out = new(int64) **out = **in } + if in.SendInitialEvents != nil { + in, out := &in.SendInitialEvents, &out.SendInitialEvents + *out = new(bool) + **out = **in + } return } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go index ef7e7c1e90..dff735dcf3 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_PartialObjectMetadataList = map[string]string{ diff --git a/vendor/k8s.io/apimachinery/pkg/labels/labels.go b/vendor/k8s.io/apimachinery/pkg/labels/labels.go index 8360d842b6..19d823cef7 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/labels.go +++ b/vendor/k8s.io/apimachinery/pkg/labels/labels.go @@ -77,6 +77,8 @@ func (ls Set) AsValidatedSelector() (Selector, error) { // perform any validation. // According to our measurements this is significantly faster // in codepaths that matter at high scale. +// Note: this method copies the Set; if the Set is immutable, consider wrapping it with ValidatedSetSelector +// instead, which does not copy. func (ls Set) AsSelectorPreValidated() Selector { return SelectorFromValidatedSet(ls) } diff --git a/vendor/k8s.io/apimachinery/pkg/labels/selector.go b/vendor/k8s.io/apimachinery/pkg/labels/selector.go index 8910043890..5e60142405 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/selector.go +++ b/vendor/k8s.io/apimachinery/pkg/labels/selector.go @@ -149,14 +149,12 @@ type Requirement struct { // NewRequirement is the constructor for a Requirement. // If any of these rules is violated, an error is returned: -// (1) The operator can only be In, NotIn, Equals, DoubleEquals, Gt, Lt, NotEquals, Exists, or DoesNotExist. -// (2) If the operator is In or NotIn, the values set must be non-empty. -// (3) If the operator is Equals, DoubleEquals, or NotEquals, the values set must contain one value. -// (4) If the operator is Exists or DoesNotExist, the value set must be empty. -// (5) If the operator is Gt or Lt, the values set must contain only one value, which will be interpreted as an integer. -// (6) The key is invalid due to its length, or sequence -// -// of characters. See validateLabelKey for more details. +// 1. The operator can only be In, NotIn, Equals, DoubleEquals, Gt, Lt, NotEquals, Exists, or DoesNotExist. +// 2. If the operator is In or NotIn, the values set must be non-empty. +// 3. If the operator is Equals, DoubleEquals, or NotEquals, the values set must contain one value. +// 4. If the operator is Exists or DoesNotExist, the value set must be empty. +// 5. If the operator is Gt or Lt, the values set must contain only one value, which will be interpreted as an integer. +// 6. The key is invalid due to its length, or sequence of characters. See validateLabelKey for more details. // // The empty string is a valid value in the input values set. // Returned error, if not nil, is guaranteed to be an aggregated field.ErrorList @@ -213,22 +211,15 @@ func (r *Requirement) hasValue(value string) bool { // Matches returns true if the Requirement matches the input Labels. // There is a match in the following cases: -// (1) The operator is Exists and Labels has the Requirement's key. -// (2) The operator is In, Labels has the Requirement's key and Labels' -// -// value for that key is in Requirement's value set. -// -// (3) The operator is NotIn, Labels has the Requirement's key and -// -// Labels' value for that key is not in Requirement's value set. -// -// (4) The operator is DoesNotExist or NotIn and Labels does not have the -// -// Requirement's key. -// -// (5) The operator is GreaterThanOperator or LessThanOperator, and Labels has -// -// the Requirement's key and the corresponding value satisfies mathematical inequality. +// 1. The operator is Exists and Labels has the Requirement's key. +// 2. The operator is In, Labels has the Requirement's key and Labels' +// value for that key is in Requirement's value set. +// 3. The operator is NotIn, Labels has the Requirement's key and +// Labels' value for that key is not in Requirement's value set. +// 4. The operator is DoesNotExist or NotIn and Labels does not have the +// Requirement's key. +// 5. The operator is GreaterThanOperator or LessThanOperator, and Labels has +// the Requirement's key and the corresponding value satisfies mathematical inequality. func (r *Requirement) Matches(ls Labels) bool { switch r.operator { case selection.In, selection.Equals, selection.DoubleEquals: @@ -872,15 +863,14 @@ func (p *Parser) parseExactValue() (sets.String, error) { // "x in (foo,,baz),y,z notin ()" // // Note: -// -// (1) Inclusion - " in " - denotes that the KEY exists and is equal to any of the -// VALUEs in its requirement -// (2) Exclusion - " notin " - denotes that the KEY is not equal to any -// of the VALUEs in its requirement or does not exist -// (3) The empty string is a valid VALUE -// (4) A requirement with just a KEY - as in "y" above - denotes that -// the KEY exists and can be any VALUE. -// (5) A requirement with just !KEY requires that the KEY not exist. +// 1. Inclusion - " in " - denotes that the KEY exists and is equal to any of the +// VALUEs in its requirement +// 2. Exclusion - " notin " - denotes that the KEY is not equal to any +// of the VALUEs in its requirement or does not exist +// 3. The empty string is a valid VALUE +// 4. A requirement with just a KEY - as in "y" above - denotes that +// the KEY exists and can be any VALUE. +// 5. A requirement with just !KEY requires that the KEY not exist. func Parse(selector string, opts ...field.PathOption) (Selector, error) { parsedSelector, err := parse(selector, field.ToPath(opts...)) if err == nil { @@ -948,6 +938,8 @@ func ValidatedSelectorFromSet(ls Set) (Selector, error) { // SelectorFromValidatedSet returns a Selector which will match exactly the given Set. // A nil and empty Sets are considered equivalent to Everything(). // It assumes that Set is already validated and doesn't do any validation. +// Note: this method copies the Set; if the Set is immutable, consider wrapping it with ValidatedSetSelector +// instead, which does not copy. func SelectorFromValidatedSet(ls Set) Selector { if ls == nil || len(ls) == 0 { return internalSelector{} @@ -969,3 +961,76 @@ func SelectorFromValidatedSet(ls Set) Selector { func ParseToRequirements(selector string, opts ...field.PathOption) ([]Requirement, error) { return parse(selector, field.ToPath(opts...)) } + +// ValidatedSetSelector wraps a Set, allowing it to implement the Selector interface. Unlike +// Set.AsSelectorPreValidated (which copies the input Set), this type simply wraps the underlying +// Set. As a result, it is substantially more efficient. A nil and empty Sets are considered +// equivalent to Everything(). +// +// Callers MUST ensure the underlying Set is not mutated, and that it is already validated. If these +// constraints are not met, Set.AsValidatedSelector should be preferred +// +// None of the Selector methods mutate the underlying Set, but Add() and Requirements() convert to +// the less optimized version. +type ValidatedSetSelector Set + +func (s ValidatedSetSelector) Matches(labels Labels) bool { + for k, v := range s { + if !labels.Has(k) || v != labels.Get(k) { + return false + } + } + return true +} + +func (s ValidatedSetSelector) Empty() bool { + return len(s) == 0 +} + +func (s ValidatedSetSelector) String() string { + keys := make([]string, 0, len(s)) + for k := range s { + keys = append(keys, k) + } + // Ensure deterministic output + sort.Strings(keys) + b := strings.Builder{} + for i, key := range keys { + v := s[key] + b.Grow(len(key) + 2 + len(v)) + if i != 0 { + b.WriteString(",") + } + b.WriteString(key) + b.WriteString("=") + b.WriteString(v) + } + return b.String() +} + +func (s ValidatedSetSelector) Add(r ...Requirement) Selector { + return s.toFullSelector().Add(r...) +} + +func (s ValidatedSetSelector) Requirements() (requirements Requirements, selectable bool) { + return s.toFullSelector().Requirements() +} + +func (s ValidatedSetSelector) DeepCopySelector() Selector { + res := make(ValidatedSetSelector, len(s)) + for k, v := range s { + res[k] = v + } + return res +} + +func (s ValidatedSetSelector) RequiresExactMatch(label string) (value string, found bool) { + v, f := s[label] + return v, f +} + +func (s ValidatedSetSelector) toFullSelector() Selector { + return SelectorFromValidatedSet(Set(s)) +} + +var _ Selector = ValidatedSetSelector{} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go index b21eb664e3..54ccb7a74c 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go @@ -191,8 +191,7 @@ func (gv GroupVersion) Identifier() string { // if none of the options match the group. It prefers a match to group and version over just group. // TODO: Move GroupVersion to a package under pkg/runtime, since it's used by scheme. // TODO: Introduce an adapter type between GroupVersion and runtime.GroupVersioner, and use LegacyCodec(GroupVersion) -// -// in fewer places. +// in fewer places. func (gv GroupVersion) KindForGroupVersionKinds(kinds []GroupVersionKind) (target GroupVersionKind, ok bool) { for _, gvk := range kinds { if gvk.Group == gv.Group && gvk.Version == gv.Version { @@ -240,8 +239,7 @@ func (gv GroupVersion) WithResource(resource string) GroupVersionResource { // GroupVersions can be used to represent a set of desired group versions. // TODO: Move GroupVersions to a package under pkg/runtime, since it's used by scheme. // TODO: Introduce an adapter type between GroupVersions and runtime.GroupVersioner, and use LegacyCodec(GroupVersion) -// -// in fewer places. +// in fewer places. type GroupVersions []GroupVersion // Identifier implements runtime.GroupVersioner interface. diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go index 18b25a994b..a5b116718d 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go @@ -118,8 +118,7 @@ func (s *Scheme) Converter() *conversion.Converter { // API group and version that would never be updated. // // TODO: there is discussion about removing unversioned and replacing it with objects that are manifest into -// -// every version with particular schemas. Resolve this method at that point. +// every version with particular schemas. Resolve this method at that point. func (s *Scheme) AddUnversionedTypes(version schema.GroupVersion, types ...Object) { s.addObservedVersion(version) s.AddKnownTypes(version, types...) diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go index 21944f2d8f..ff98208420 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go @@ -259,8 +259,7 @@ func (f CodecFactory) SupportedMediaTypes() []runtime.SerializerInfo { // invoke CodecForVersions. Callers that need only to read data should use UniversalDecoder(). // // TODO: make this call exist only in pkg/api, and initialize it with the set of default versions. -// -// All other callers will be forced to request a Codec directly. +// All other callers will be forced to request a Codec directly. func (f CodecFactory) LegacyCodec(version ...schema.GroupVersion) runtime.Codec { return versioning.NewDefaultingCodecForScheme(f.scheme, f.legacySerializer, f.universal, schema.GroupVersions(version), runtime.InternalGroupVersioner) } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go index 4466331829..25f955ed75 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go @@ -147,7 +147,7 @@ func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into ru } if d, ok := obj.(runtime.NestedObjectDecoder); ok { - if err := d.DecodeNestedObjects(runtime.WithoutVersionDecoder{c.decoder}); err != nil { + if err := d.DecodeNestedObjects(runtime.WithoutVersionDecoder{Decoder: c.decoder}); err != nil { if strictErr, ok := runtime.AsStrictDecodingError(err); ok { // save the strictDecodingError let and the caller decide what to do with it strictDecodingErrs = append(strictDecodingErrs, strictErr.Errors()...) diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/types.go b/vendor/k8s.io/apimachinery/pkg/runtime/types.go index 3dc9a5a2f2..ce77c7910a 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/types.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/types.go @@ -123,7 +123,7 @@ type Unknown struct { // Raw will hold the complete serialized object which couldn't be matched // with a registered type. Most likely, nothing should be done with this // except for passing it through the system. - Raw []byte `protobuf:"bytes,2,opt,name=raw"` + Raw []byte `json:"-" protobuf:"bytes,2,opt,name=raw"` // ContentEncoding is encoding used to encode 'Raw' data. // Unspecified means no encoding. ContentEncoding string `protobuf:"bytes,3,opt,name=contentEncoding"` diff --git a/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go b/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go index b19750f3a0..db18ce1ce2 100644 --- a/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go +++ b/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go @@ -37,3 +37,14 @@ const ( func (n NamespacedName) String() string { return n.Namespace + string(Separator) + n.Name } + +// MarshalLog emits a struct containing required key/value pair +func (n NamespacedName) MarshalLog() interface{} { + return struct { + Name string `json:"name"` + Namespace string `json:"namespace,omitempty"` + }{ + Name: n.Name, + Namespace: n.Namespace, + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go index 1f5a04fd41..1b60d145c6 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go @@ -214,7 +214,7 @@ func CreateAggregateFromMessageCountMap(m MessageCountMap) Aggregate { return NewAggregate(result) } -// Reduce will return err or, if err is an Aggregate and only has one item, +// Reduce will return err or nil, if err is an Aggregate and only has one item, // the first item in the aggregate. func Reduce(err error) error { if agg, ok := err.(Aggregate); ok && err != nil { diff --git a/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go index ca08f8561d..9b3c9c8d5a 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go +++ b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go @@ -32,7 +32,7 @@ func NewLengthDelimitedFrameWriter(w io.Writer) io.Writer { return &lengthDelimitedFrameWriter{w: w} } -// Write writes a single frame to the nested writer, prepending it with the length in +// Write writes a single frame to the nested writer, prepending it with the length // in bytes of data (as a 4 byte, bigendian uint32). func (w *lengthDelimitedFrameWriter) Write(data []byte) (int, error) { binary.BigEndian.PutUint32(w.h[:], uint32(len(data))) diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/endpoints.yaml b/vendor/k8s.io/apimachinery/pkg/util/managedfields/endpoints.yaml new file mode 100644 index 0000000000..a667e98342 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/endpoints.yaml @@ -0,0 +1,7018 @@ +apiVersion: v1 +kind: Endpoints +metadata: + creationTimestamp: '2016-10-04T17:45:58Z' + labels: + app: my-app + name: app-server + namespace: default + resourceVersion: '184597135' + selfLink: /self/link + uid: 6826f086-8a5a-11e6-8d09-42010a800005 +subsets: +- addresses: + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0000 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0001 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0002 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0003 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0004 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0005 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0006 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0007 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0008 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0009 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0010 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0011 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0012 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0013 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0014 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0015 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0016 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0017 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0018 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0019 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0020 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0021 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0022 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0023 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0024 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0025 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0026 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0027 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0028 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0029 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0030 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0031 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0032 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0033 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0034 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0035 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0036 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0037 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0038 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0039 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0040 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0041 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0042 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0043 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0044 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0045 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0046 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0047 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0048 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0049 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0050 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0051 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0052 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0053 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0054 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0055 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0056 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0057 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0058 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0059 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0060 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0061 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0062 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0063 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0064 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0065 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0066 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0067 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0068 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0069 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0070 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0071 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0072 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0073 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0074 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0075 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0076 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0077 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0078 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0079 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0080 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0081 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0082 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0083 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0084 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0085 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0086 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0087 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0088 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0089 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0090 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0091 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0092 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0093 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0094 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0095 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0096 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0097 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0098 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0099 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0100 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0101 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0102 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0103 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0104 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0105 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0106 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0107 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0108 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0109 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0110 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0111 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0112 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0113 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0114 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0115 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0116 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0117 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0118 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0119 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0120 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0121 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0122 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0123 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0124 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0125 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0126 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0127 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0128 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0129 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0130 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0131 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0132 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0133 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0134 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0135 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0136 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0137 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0138 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0139 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0140 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0141 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0142 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0143 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0144 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0145 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0146 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0147 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0148 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0149 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0150 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0151 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0152 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0153 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0154 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0155 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0156 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0157 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0158 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0159 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0160 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0161 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0162 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0163 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0164 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0165 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0166 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0167 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0168 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0169 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0170 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0171 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0172 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0173 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0174 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0175 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0176 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0177 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0178 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0179 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0180 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0181 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0182 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0183 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0184 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0185 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0186 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0187 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0188 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0189 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0190 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0191 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0192 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0193 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0194 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0195 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0196 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0197 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0198 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0199 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0200 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0201 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0202 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0203 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0204 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0205 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0206 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0207 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0208 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0209 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0210 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0211 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0212 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0213 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0214 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0215 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0216 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0217 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0218 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0219 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0220 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0221 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0222 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0223 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0224 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0225 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0226 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0227 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0228 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0229 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0230 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0231 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0232 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0233 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0234 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0235 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0236 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0237 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0238 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0239 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0240 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0241 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0242 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0243 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0244 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0245 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0246 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0247 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0248 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0249 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0250 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0251 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0252 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0253 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0254 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0255 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0256 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0257 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0258 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0259 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0260 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0261 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0262 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0263 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0264 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0265 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0266 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0267 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0268 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0269 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0270 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0271 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0272 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0273 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0274 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0275 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0276 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0277 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0278 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0279 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0280 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0281 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0282 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0283 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0284 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0285 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0286 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0287 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0288 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0289 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0290 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0291 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0292 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0293 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0294 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0295 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0296 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0297 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0298 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0299 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0300 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0301 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0302 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0303 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0304 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0305 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0306 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0307 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0308 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0309 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0310 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0311 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0312 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0313 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0314 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0315 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0316 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0317 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0318 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0319 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0320 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0321 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0322 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0323 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0324 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0325 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0326 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0327 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0328 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0329 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0330 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0331 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0332 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0333 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0334 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0335 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0336 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0337 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0338 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0339 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0340 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0341 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0342 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0343 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0344 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0345 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0346 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0347 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0348 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0349 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0350 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0351 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0352 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0353 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0354 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0355 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0356 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0357 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0358 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0359 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0360 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0361 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0362 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0363 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0364 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0365 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0366 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0367 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0368 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0369 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0370 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0371 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0372 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0373 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0374 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0375 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0376 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0377 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0378 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0379 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0380 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0381 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0382 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0383 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0384 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0385 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0386 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0387 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0388 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0389 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0390 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0391 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0392 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0393 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0394 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0395 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0396 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0397 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0398 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0399 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0400 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0401 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0402 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0403 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0404 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0405 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0406 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0407 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0408 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0409 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0410 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0411 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0412 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0413 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0414 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0415 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0416 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0417 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0418 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0419 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0420 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0421 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0422 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0423 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0424 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0425 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0426 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0427 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0428 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0429 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0430 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0431 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0432 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0433 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0434 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0435 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0436 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0437 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0438 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0439 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0440 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0441 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0442 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0443 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0444 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0445 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0446 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0447 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0448 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0449 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0450 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0451 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0452 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0453 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0454 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0455 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0456 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0457 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0458 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0459 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0460 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0461 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0462 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0463 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0464 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0465 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0466 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0467 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0468 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0469 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0470 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0471 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0472 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0473 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0474 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0475 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0476 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0477 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0478 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0479 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0480 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0481 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0482 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0483 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0484 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0485 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0486 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0487 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0488 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0489 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0490 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0491 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0492 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0493 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0494 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0495 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0496 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0497 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0498 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0499 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0500 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0501 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0502 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0503 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0504 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0505 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0506 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0507 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0508 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0509 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0510 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0511 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0512 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0513 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0514 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0515 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0516 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0517 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0518 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0519 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0520 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0521 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0522 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0523 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0524 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0525 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0526 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0527 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0528 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0529 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0530 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0531 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0532 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0533 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0534 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0535 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0536 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0537 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0538 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0539 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0540 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0541 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0542 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0543 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0544 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0545 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0546 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0547 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0548 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0549 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0550 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0551 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0552 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0553 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0554 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0555 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0556 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0557 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0558 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0559 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0560 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0561 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0562 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0563 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0564 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0565 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0566 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0567 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0568 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0569 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0570 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0571 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0572 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0573 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0574 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0575 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0576 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0577 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0578 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0579 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0580 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0581 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0582 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0583 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0584 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0585 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0586 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0587 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0588 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0589 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0590 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0591 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0592 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0593 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0594 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0595 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0596 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0597 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0598 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0599 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0600 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0601 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0602 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0603 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0604 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0605 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0606 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0607 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0608 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0609 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0610 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0611 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0612 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0613 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0614 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0615 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0616 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0617 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0618 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0619 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0620 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0621 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0622 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0623 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0624 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0625 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0626 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0627 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0628 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0629 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0630 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0631 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0632 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0633 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0634 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0635 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0636 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0637 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0638 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0639 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0640 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0641 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0642 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0643 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0644 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0645 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0646 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0647 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0648 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0649 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0650 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0651 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0652 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0653 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0654 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0655 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0656 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0657 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0658 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0659 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0660 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0661 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0662 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0663 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0664 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0665 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0666 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0667 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0668 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0669 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0670 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0671 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0672 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0673 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0674 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0675 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0676 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0677 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0678 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0679 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0680 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0681 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0682 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0683 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0684 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0685 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0686 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0687 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0688 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0689 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0690 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0691 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0692 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0693 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0694 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0695 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0696 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0697 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0698 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0699 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0700 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0701 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0702 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0703 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0704 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0705 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0706 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0707 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0708 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0709 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0710 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0711 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0712 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0713 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0714 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0715 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0716 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0717 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0718 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0719 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0720 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0721 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0722 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0723 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0724 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0725 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0726 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0727 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0728 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0729 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0730 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0731 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0732 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0733 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0734 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0735 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0736 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0737 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0738 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0739 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0740 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0741 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0742 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0743 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0744 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0745 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0746 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0747 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0748 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0749 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0750 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0751 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0752 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0753 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0754 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0755 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0756 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0757 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0758 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0759 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0760 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0761 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0762 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0763 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0764 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0765 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0766 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0767 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0768 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0769 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0770 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0771 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0772 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0773 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0774 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0775 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0776 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0777 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0778 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0779 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0780 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0781 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0782 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0783 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0784 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0785 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0786 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0787 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0788 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0789 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0790 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0791 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0792 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0793 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0794 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0795 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0796 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0797 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0798 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0799 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0800 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0801 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0802 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0803 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0804 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0805 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0806 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0807 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0808 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0809 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0810 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0811 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0812 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0813 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0814 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0815 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0816 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0817 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0818 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0819 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0820 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0821 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0822 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0823 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0824 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0825 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0826 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0827 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0828 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0829 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0830 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0831 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0832 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0833 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0834 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0835 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0836 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0837 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0838 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0839 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0840 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0841 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0842 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0843 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0844 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0845 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0846 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0847 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0848 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0849 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0850 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0851 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0852 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0853 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0854 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0855 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0856 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0857 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0858 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0859 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0860 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0861 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0862 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0863 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0864 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0865 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0866 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0867 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0868 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0869 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0870 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0871 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0872 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0873 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0874 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0875 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0876 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0877 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0878 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0879 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0880 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0881 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0882 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0883 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0884 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0885 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0886 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0887 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0888 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0889 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0890 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0891 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0892 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0893 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0894 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0895 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0896 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0897 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0898 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0899 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0900 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0901 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0902 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0903 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0904 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0905 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0906 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0907 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0908 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0909 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0910 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0911 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0912 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0913 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0914 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0915 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0916 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0917 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0918 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0919 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0920 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0921 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0922 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0923 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0924 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0925 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0926 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0927 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0928 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0929 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0930 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0931 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0932 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0933 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0934 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0935 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0936 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0937 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0938 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0939 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0940 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0941 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0942 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0943 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0944 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0945 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0946 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0947 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0948 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0949 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0950 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0951 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0952 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0953 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0954 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0955 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0956 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0957 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0958 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0959 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0960 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0961 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0962 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0963 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0964 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0965 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0966 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0967 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0968 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0969 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0970 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0971 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0972 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0973 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0974 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0975 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0976 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0977 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0978 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0979 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0980 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0981 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0982 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0983 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0984 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0985 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0986 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0987 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0988 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0989 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0990 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0991 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0992 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0993 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0994 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0995 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0996 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0997 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0998 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0999 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + ports: + - name: port-name + port: 8080 + protocol: TCP + diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/fieldmanager.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/fieldmanager.go new file mode 100644 index 0000000000..978ffb3c3e --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/fieldmanager.go @@ -0,0 +1,57 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package managedfields + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/managedfields/internal" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +// FieldManager updates the managed fields and merges applied +// configurations. +type FieldManager = internal.FieldManager + +// NewDefaultFieldManager creates a new FieldManager that merges apply requests +// and update managed fields for other types of requests. +func NewDefaultFieldManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, hub schema.GroupVersion, subresource string, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (*FieldManager, error) { + f, err := internal.NewStructuredMergeManager(typeConverter, objectConverter, objectDefaulter, kind.GroupVersion(), hub, resetFields) + if err != nil { + return nil, fmt.Errorf("failed to create field manager: %v", err) + } + return internal.NewDefaultFieldManager(f, typeConverter, objectConverter, objectCreater, kind, subresource), nil +} + +// NewDefaultCRDFieldManager creates a new FieldManager specifically for +// CRDs. This allows for the possibility of fields which are not defined +// in models, as well as having no models defined at all. +func NewDefaultCRDFieldManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, hub schema.GroupVersion, subresource string, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (_ *FieldManager, err error) { + f, err := internal.NewCRDStructuredMergeManager(typeConverter, objectConverter, objectDefaulter, kind.GroupVersion(), hub, resetFields) + if err != nil { + return nil, fmt.Errorf("failed to create field manager: %v", err) + } + return internal.NewDefaultFieldManager(f, typeConverter, objectConverter, objectCreater, kind, subresource), nil +} + +func ValidateManagedFields(encodedManagedFields []metav1.ManagedFieldsEntry) error { + _, err := internal.DecodeManagedFields(encodedManagedFields) + return err +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/atmostevery.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/atmostevery.go new file mode 100644 index 0000000000..b75ef7416e --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/atmostevery.go @@ -0,0 +1,60 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "sync" + "time" +) + +// AtMostEvery will never run the method more than once every specified +// duration. +type AtMostEvery struct { + delay time.Duration + lastCall time.Time + mutex sync.Mutex +} + +// NewAtMostEvery creates a new AtMostEvery, that will run the method at +// most every given duration. +func NewAtMostEvery(delay time.Duration) *AtMostEvery { + return &AtMostEvery{ + delay: delay, + } +} + +// updateLastCall returns true if the lastCall time has been updated, +// false if it was too early. +func (s *AtMostEvery) updateLastCall() bool { + s.mutex.Lock() + defer s.mutex.Unlock() + if time.Since(s.lastCall) < s.delay { + return false + } + s.lastCall = time.Now() + return true +} + +// Do will run the method if enough time has passed, and return true. +// Otherwise, it does nothing and returns false. +func (s *AtMostEvery) Do(fn func()) bool { + if !s.updateLastCall() { + return false + } + fn() + return true +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/buildmanagerinfo.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/buildmanagerinfo.go new file mode 100644 index 0000000000..fa342ca135 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/buildmanagerinfo.go @@ -0,0 +1,74 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type buildManagerInfoManager struct { + fieldManager Manager + groupVersion schema.GroupVersion + subresource string +} + +var _ Manager = &buildManagerInfoManager{} + +// NewBuildManagerInfoManager creates a new Manager that converts the manager name into a unique identifier +// combining operation and version for update requests, and just operation for apply requests. +func NewBuildManagerInfoManager(f Manager, gv schema.GroupVersion, subresource string) Manager { + return &buildManagerInfoManager{ + fieldManager: f, + groupVersion: gv, + subresource: subresource, + } +} + +// Update implements Manager. +func (f *buildManagerInfoManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + manager, err := f.buildManagerInfo(manager, metav1.ManagedFieldsOperationUpdate) + if err != nil { + return nil, nil, fmt.Errorf("failed to build manager identifier: %v", err) + } + return f.fieldManager.Update(liveObj, newObj, managed, manager) +} + +// Apply implements Manager. +func (f *buildManagerInfoManager) Apply(liveObj, appliedObj runtime.Object, managed Managed, manager string, force bool) (runtime.Object, Managed, error) { + manager, err := f.buildManagerInfo(manager, metav1.ManagedFieldsOperationApply) + if err != nil { + return nil, nil, fmt.Errorf("failed to build manager identifier: %v", err) + } + return f.fieldManager.Apply(liveObj, appliedObj, managed, manager, force) +} + +func (f *buildManagerInfoManager) buildManagerInfo(prefix string, operation metav1.ManagedFieldsOperationType) (string, error) { + managerInfo := metav1.ManagedFieldsEntry{ + Manager: prefix, + Operation: operation, + APIVersion: f.groupVersion.String(), + Subresource: f.subresource, + } + if managerInfo.Manager == "" { + managerInfo.Manager = "unknown" + } + return BuildManagerIdentifier(&managerInfo) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/capmanagers.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/capmanagers.go new file mode 100644 index 0000000000..8951932ba4 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/capmanagers.go @@ -0,0 +1,133 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + "sort" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +type capManagersManager struct { + fieldManager Manager + maxUpdateManagers int + oldUpdatesManagerName string +} + +var _ Manager = &capManagersManager{} + +// NewCapManagersManager creates a new wrapped FieldManager which ensures that the number of managers from updates +// does not exceed maxUpdateManagers, by merging some of the oldest entries on each update. +func NewCapManagersManager(fieldManager Manager, maxUpdateManagers int) Manager { + return &capManagersManager{ + fieldManager: fieldManager, + maxUpdateManagers: maxUpdateManagers, + oldUpdatesManagerName: "ancient-changes", + } +} + +// Update implements Manager. +func (f *capManagersManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + object, managed, err := f.fieldManager.Update(liveObj, newObj, managed, manager) + if err != nil { + return object, managed, err + } + if managed, err = f.capUpdateManagers(managed); err != nil { + return nil, nil, fmt.Errorf("failed to cap update managers: %v", err) + } + return object, managed, nil +} + +// Apply implements Manager. +func (f *capManagersManager) Apply(liveObj, appliedObj runtime.Object, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) { + return f.fieldManager.Apply(liveObj, appliedObj, managed, fieldManager, force) +} + +// capUpdateManagers merges a number of the oldest update entries into versioned buckets, +// such that the number of entries from updates does not exceed f.maxUpdateManagers. +func (f *capManagersManager) capUpdateManagers(managed Managed) (newManaged Managed, err error) { + // Gather all entries from updates + updaters := []string{} + for manager, fields := range managed.Fields() { + if !fields.Applied() { + updaters = append(updaters, manager) + } + } + if len(updaters) <= f.maxUpdateManagers { + return managed, nil + } + + // If we have more than the maximum, sort the update entries by time, oldest first. + sort.Slice(updaters, func(i, j int) bool { + iTime, jTime, iSeconds, jSeconds := managed.Times()[updaters[i]], managed.Times()[updaters[j]], int64(0), int64(0) + if iTime != nil { + iSeconds = iTime.Unix() + } + if jTime != nil { + jSeconds = jTime.Unix() + } + if iSeconds != jSeconds { + return iSeconds < jSeconds + } + return updaters[i] < updaters[j] + }) + + // Merge the oldest updaters with versioned bucket managers until the number of updaters is under the cap + versionToFirstManager := map[string]string{} + for i, length := 0, len(updaters); i < len(updaters) && length > f.maxUpdateManagers; i++ { + manager := updaters[i] + vs := managed.Fields()[manager] + time := managed.Times()[manager] + version := string(vs.APIVersion()) + + // Create a new manager identifier for the versioned bucket entry. + // The version for this manager comes from the version of the update being merged into the bucket. + bucket, err := BuildManagerIdentifier(&metav1.ManagedFieldsEntry{ + Manager: f.oldUpdatesManagerName, + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: version, + }) + if err != nil { + return managed, fmt.Errorf("failed to create bucket manager for version %v: %v", version, err) + } + + // Merge the fieldets if this is not the first time the version was seen. + // Otherwise just record the manager name in versionToFirstManager + if first, ok := versionToFirstManager[version]; ok { + // If the bucket doesn't exists yet, create one. + if _, ok := managed.Fields()[bucket]; !ok { + s := managed.Fields()[first] + delete(managed.Fields(), first) + managed.Fields()[bucket] = s + } + + managed.Fields()[bucket] = fieldpath.NewVersionedSet(vs.Set().Union(managed.Fields()[bucket].Set()), vs.APIVersion(), vs.Applied()) + delete(managed.Fields(), manager) + length-- + + // Use the time from the update being merged into the bucket, since it is more recent. + managed.Times()[bucket] = time + } else { + versionToFirstManager[version] = manager + } + } + + return managed, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/conflict.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/conflict.go new file mode 100644 index 0000000000..8c044c9157 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/conflict.go @@ -0,0 +1,89 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "encoding/json" + "fmt" + "sort" + "strings" + "time" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/merge" +) + +// NewConflictError returns an error including details on the requests apply conflicts +func NewConflictError(conflicts merge.Conflicts) *errors.StatusError { + causes := []metav1.StatusCause{} + for _, conflict := range conflicts { + causes = append(causes, metav1.StatusCause{ + Type: metav1.CauseTypeFieldManagerConflict, + Message: fmt.Sprintf("conflict with %v", printManager(conflict.Manager)), + Field: conflict.Path.String(), + }) + } + return errors.NewApplyConflict(causes, getConflictMessage(conflicts)) +} + +func getConflictMessage(conflicts merge.Conflicts) string { + if len(conflicts) == 1 { + return fmt.Sprintf("Apply failed with 1 conflict: conflict with %v: %v", printManager(conflicts[0].Manager), conflicts[0].Path) + } + + m := map[string][]fieldpath.Path{} + for _, conflict := range conflicts { + m[conflict.Manager] = append(m[conflict.Manager], conflict.Path) + } + + uniqueManagers := []string{} + for manager := range m { + uniqueManagers = append(uniqueManagers, manager) + } + + // Print conflicts by sorted managers. + sort.Strings(uniqueManagers) + + messages := []string{} + for _, manager := range uniqueManagers { + messages = append(messages, fmt.Sprintf("conflicts with %v:", printManager(manager))) + for _, path := range m[manager] { + messages = append(messages, fmt.Sprintf("- %v", path)) + } + } + return fmt.Sprintf("Apply failed with %d conflicts: %s", len(conflicts), strings.Join(messages, "\n")) +} + +func printManager(manager string) string { + encodedManager := &metav1.ManagedFieldsEntry{} + if err := json.Unmarshal([]byte(manager), encodedManager); err != nil { + return fmt.Sprintf("%q", manager) + } + managerStr := fmt.Sprintf("%q", encodedManager.Manager) + if encodedManager.Subresource != "" { + managerStr = fmt.Sprintf("%s with subresource %q", managerStr, encodedManager.Subresource) + } + if encodedManager.Operation == metav1.ManagedFieldsOperationUpdate { + if encodedManager.Time == nil { + return fmt.Sprintf("%s using %v", managerStr, encodedManager.APIVersion) + } + return fmt.Sprintf("%s using %v at %v", managerStr, encodedManager.APIVersion, encodedManager.Time.UTC().Format(time.RFC3339)) + } + return managerStr +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fieldmanager.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fieldmanager.go new file mode 100644 index 0000000000..f3111d4bc7 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fieldmanager.go @@ -0,0 +1,206 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + "reflect" + "time" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/klog/v2" + "sigs.k8s.io/structured-merge-diff/v4/merge" +) + +// DefaultMaxUpdateManagers defines the default maximum retained number of managedFields entries from updates +// if the number of update managers exceeds this, the oldest entries will be merged until the number is below the maximum. +// TODO(jennybuckley): Determine if this is really the best value. Ideally we wouldn't unnecessarily merge too many entries. +const DefaultMaxUpdateManagers int = 10 + +// DefaultTrackOnCreateProbability defines the default probability that the field management of an object +// starts being tracked from the object's creation, instead of from the first time the object is applied to. +const DefaultTrackOnCreateProbability float32 = 1 + +var atMostEverySecond = NewAtMostEvery(time.Second) + +// FieldManager updates the managed fields and merges applied +// configurations. +type FieldManager struct { + fieldManager Manager + subresource string +} + +// NewFieldManager creates a new FieldManager that decodes, manages, then re-encodes managedFields +// on update and apply requests. +func NewFieldManager(f Manager, subresource string) *FieldManager { + return &FieldManager{fieldManager: f, subresource: subresource} +} + +// newDefaultFieldManager is a helper function which wraps a Manager with certain default logic. +func NewDefaultFieldManager(f Manager, typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, subresource string) *FieldManager { + return NewFieldManager( + NewLastAppliedUpdater( + NewLastAppliedManager( + NewProbabilisticSkipNonAppliedManager( + NewCapManagersManager( + NewBuildManagerInfoManager( + NewManagedFieldsUpdater( + NewStripMetaManager(f), + ), kind.GroupVersion(), subresource, + ), DefaultMaxUpdateManagers, + ), objectCreater, kind, DefaultTrackOnCreateProbability, + ), typeConverter, objectConverter, kind.GroupVersion()), + ), subresource, + ) +} + +func decodeLiveOrNew(liveObj, newObj runtime.Object, ignoreManagedFieldsFromRequestObject bool) (Managed, error) { + liveAccessor, err := meta.Accessor(liveObj) + if err != nil { + return nil, err + } + + // We take the managedFields of the live object in case the request tries to + // manually set managedFields via a subresource. + if ignoreManagedFieldsFromRequestObject { + return emptyManagedFieldsOnErr(DecodeManagedFields(liveAccessor.GetManagedFields())) + } + + // If the object doesn't have metadata, we should just return without trying to + // set the managedFields at all, so creates/updates/patches will work normally. + newAccessor, err := meta.Accessor(newObj) + if err != nil { + return nil, err + } + + if isResetManagedFields(newAccessor.GetManagedFields()) { + return NewEmptyManaged(), nil + } + + // If the managed field is empty or we failed to decode it, + // let's try the live object. This is to prevent clients who + // don't understand managedFields from deleting it accidentally. + managed, err := DecodeManagedFields(newAccessor.GetManagedFields()) + if err != nil || len(managed.Fields()) == 0 { + return emptyManagedFieldsOnErr(DecodeManagedFields(liveAccessor.GetManagedFields())) + } + return managed, nil +} + +func emptyManagedFieldsOnErr(managed Managed, err error) (Managed, error) { + if err != nil { + return NewEmptyManaged(), nil + } + return managed, nil +} + +// Update is used when the object has already been merged (non-apply +// use-case), and simply updates the managed fields in the output +// object. +func (f *FieldManager) Update(liveObj, newObj runtime.Object, manager string) (object runtime.Object, err error) { + // First try to decode the managed fields provided in the update, + // This is necessary to allow directly updating managed fields. + isSubresource := f.subresource != "" + managed, err := decodeLiveOrNew(liveObj, newObj, isSubresource) + if err != nil { + return newObj, nil + } + + RemoveObjectManagedFields(newObj) + + if object, managed, err = f.fieldManager.Update(liveObj, newObj, managed, manager); err != nil { + return nil, err + } + + if err = EncodeObjectManagedFields(object, managed); err != nil { + return nil, fmt.Errorf("failed to encode managed fields: %v", err) + } + + return object, nil +} + +// UpdateNoErrors is the same as Update, but it will not return +// errors. If an error happens, the object is returned with +// managedFields cleared. +func (f *FieldManager) UpdateNoErrors(liveObj, newObj runtime.Object, manager string) runtime.Object { + obj, err := f.Update(liveObj, newObj, manager) + if err != nil { + atMostEverySecond.Do(func() { + ns, name := "unknown", "unknown" + if accessor, err := meta.Accessor(newObj); err == nil { + ns = accessor.GetNamespace() + name = accessor.GetName() + } + + klog.ErrorS(err, "[SHOULD NOT HAPPEN] failed to update managedFields", "versionKind", + newObj.GetObjectKind().GroupVersionKind(), "namespace", ns, "name", name) + }) + // Explicitly remove managedFields on failure, so that + // we can't have garbage in it. + RemoveObjectManagedFields(newObj) + return newObj + } + return obj +} + +// Returns true if the managedFields indicate that the user is trying to +// reset the managedFields, i.e. if the list is non-nil but empty, or if +// the list has one empty item. +func isResetManagedFields(managedFields []metav1.ManagedFieldsEntry) bool { + if len(managedFields) == 0 { + return managedFields != nil + } + + if len(managedFields) == 1 { + return reflect.DeepEqual(managedFields[0], metav1.ManagedFieldsEntry{}) + } + + return false +} + +// Apply is used when server-side apply is called, as it merges the +// object and updates the managed fields. +func (f *FieldManager) Apply(liveObj, appliedObj runtime.Object, manager string, force bool) (object runtime.Object, err error) { + // If the object doesn't have metadata, apply isn't allowed. + accessor, err := meta.Accessor(liveObj) + if err != nil { + return nil, fmt.Errorf("couldn't get accessor: %v", err) + } + + // Decode the managed fields in the live object, since it isn't allowed in the patch. + managed, err := DecodeManagedFields(accessor.GetManagedFields()) + if err != nil { + return nil, fmt.Errorf("failed to decode managed fields: %v", err) + } + + object, managed, err = f.fieldManager.Apply(liveObj, appliedObj, managed, manager, force) + if err != nil { + if conflicts, ok := err.(merge.Conflicts); ok { + return nil, NewConflictError(conflicts) + } + return nil, err + } + + if err = EncodeObjectManagedFields(object, managed); err != nil { + return nil, fmt.Errorf("failed to encode managed fields: %v", err) + } + + return object, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fields.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fields.go new file mode 100644 index 0000000000..08186191a7 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fields.go @@ -0,0 +1,47 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "bytes" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +// EmptyFields represents a set with no paths +// It looks like metav1.Fields{Raw: []byte("{}")} +var EmptyFields = func() metav1.FieldsV1 { + f, err := SetToFields(*fieldpath.NewSet()) + if err != nil { + panic("should never happen") + } + return f +}() + +// FieldsToSet creates a set paths from an input trie of fields +func FieldsToSet(f metav1.FieldsV1) (s fieldpath.Set, err error) { + err = s.FromJSON(bytes.NewReader(f.Raw)) + return s, err +} + +// SetToFields creates a trie of fields from an input set of paths +func SetToFields(s fieldpath.Set) (f metav1.FieldsV1, err error) { + f.Raw, err = s.ToJSON() + return f, err +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastapplied.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastapplied.go new file mode 100644 index 0000000000..b00b6b8298 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastapplied.go @@ -0,0 +1,50 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/runtime" +) + +// LastAppliedConfigAnnotation is the annotation used to store the previous +// configuration of a resource for use in a three way diff by UpdateApplyAnnotation. +// +// This is a copy of the corev1 annotation since we don't want to depend on the whole package. +const LastAppliedConfigAnnotation = "kubectl.kubernetes.io/last-applied-configuration" + +// SetLastApplied sets the last-applied annotation the given value in +// the object. +func SetLastApplied(obj runtime.Object, value string) error { + accessor, err := meta.Accessor(obj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + var annotations = accessor.GetAnnotations() + if annotations == nil { + annotations = map[string]string{} + } + annotations[LastAppliedConfigAnnotation] = value + if err := apimachineryvalidation.ValidateAnnotationsSize(annotations); err != nil { + delete(annotations, LastAppliedConfigAnnotation) + } + accessor.SetAnnotations(annotations) + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedmanager.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedmanager.go new file mode 100644 index 0000000000..3f6cf88210 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedmanager.go @@ -0,0 +1,171 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "encoding/json" + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/merge" +) + +type lastAppliedManager struct { + fieldManager Manager + typeConverter TypeConverter + objectConverter runtime.ObjectConvertor + groupVersion schema.GroupVersion +} + +var _ Manager = &lastAppliedManager{} + +// NewLastAppliedManager converts the client-side apply annotation to +// server-side apply managed fields +func NewLastAppliedManager(fieldManager Manager, typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, groupVersion schema.GroupVersion) Manager { + return &lastAppliedManager{ + fieldManager: fieldManager, + typeConverter: typeConverter, + objectConverter: objectConverter, + groupVersion: groupVersion, + } +} + +// Update implements Manager. +func (f *lastAppliedManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + return f.fieldManager.Update(liveObj, newObj, managed, manager) +} + +// Apply will consider the last-applied annotation +// for upgrading an object managed by client-side apply to server-side apply +// without conflicts. +func (f *lastAppliedManager) Apply(liveObj, newObj runtime.Object, managed Managed, manager string, force bool) (runtime.Object, Managed, error) { + newLiveObj, newManaged, newErr := f.fieldManager.Apply(liveObj, newObj, managed, manager, force) + // Upgrade the client-side apply annotation only from kubectl server-side-apply. + // To opt-out of this behavior, users may specify a different field manager. + if manager != "kubectl" { + return newLiveObj, newManaged, newErr + } + + // Check if we have conflicts + if newErr == nil { + return newLiveObj, newManaged, newErr + } + conflicts, ok := newErr.(merge.Conflicts) + if !ok { + return newLiveObj, newManaged, newErr + } + conflictSet := conflictsToSet(conflicts) + + // Check if conflicts are allowed due to client-side apply, + // and if so, then force apply + allowedConflictSet, err := f.allowedConflictsFromLastApplied(liveObj) + if err != nil { + return newLiveObj, newManaged, newErr + } + if !conflictSet.Difference(allowedConflictSet).Empty() { + newConflicts := conflictsDifference(conflicts, allowedConflictSet) + return newLiveObj, newManaged, newConflicts + } + + return f.fieldManager.Apply(liveObj, newObj, managed, manager, true) +} + +func (f *lastAppliedManager) allowedConflictsFromLastApplied(liveObj runtime.Object) (*fieldpath.Set, error) { + var accessor, err = meta.Accessor(liveObj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + + // If there is no client-side apply annotation, then there is nothing to do + var annotations = accessor.GetAnnotations() + if annotations == nil { + return nil, fmt.Errorf("no last applied annotation") + } + var lastApplied, ok = annotations[LastAppliedConfigAnnotation] + if !ok || lastApplied == "" { + return nil, fmt.Errorf("no last applied annotation") + } + + liveObjVersioned, err := f.objectConverter.ConvertToVersion(liveObj, f.groupVersion) + if err != nil { + return nil, fmt.Errorf("failed to convert live obj to versioned: %v", err) + } + + liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned) + if err != nil { + return nil, fmt.Errorf("failed to convert live obj to typed: %v", err) + } + + var lastAppliedObj = &unstructured.Unstructured{Object: map[string]interface{}{}} + err = json.Unmarshal([]byte(lastApplied), lastAppliedObj) + if err != nil { + return nil, fmt.Errorf("failed to decode last applied obj: %v in '%s'", err, lastApplied) + } + + if lastAppliedObj.GetAPIVersion() != f.groupVersion.String() { + return nil, fmt.Errorf("expected version of last applied to match live object '%s', but got '%s': %v", f.groupVersion.String(), lastAppliedObj.GetAPIVersion(), err) + } + + lastAppliedObjTyped, err := f.typeConverter.ObjectToTyped(lastAppliedObj) + if err != nil { + return nil, fmt.Errorf("failed to convert last applied to typed: %v", err) + } + + lastAppliedObjFieldSet, err := lastAppliedObjTyped.ToFieldSet() + if err != nil { + return nil, fmt.Errorf("failed to create fieldset for last applied object: %v", err) + } + + comparison, err := lastAppliedObjTyped.Compare(liveObjTyped) + if err != nil { + return nil, fmt.Errorf("failed to compare last applied object and live object: %v", err) + } + + // Remove fields in last applied that are different, added, or missing in + // the live object. + // Because last-applied fields don't match the live object fields, + // then we don't own these fields. + lastAppliedObjFieldSet = lastAppliedObjFieldSet. + Difference(comparison.Modified). + Difference(comparison.Added). + Difference(comparison.Removed) + + return lastAppliedObjFieldSet, nil +} + +// TODO: replace with merge.Conflicts.ToSet() +func conflictsToSet(conflicts merge.Conflicts) *fieldpath.Set { + conflictSet := fieldpath.NewSet() + for _, conflict := range []merge.Conflict(conflicts) { + conflictSet.Insert(conflict.Path) + } + return conflictSet +} + +func conflictsDifference(conflicts merge.Conflicts, s *fieldpath.Set) merge.Conflicts { + newConflicts := []merge.Conflict{} + for _, conflict := range []merge.Conflict(conflicts) { + if !s.Has(conflict.Path) { + newConflicts = append(newConflicts, conflict) + } + } + return newConflicts +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedupdater.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedupdater.go new file mode 100644 index 0000000000..06e6c5d8ce --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedupdater.go @@ -0,0 +1,102 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" +) + +type lastAppliedUpdater struct { + fieldManager Manager +} + +var _ Manager = &lastAppliedUpdater{} + +// NewLastAppliedUpdater sets the client-side apply annotation up to date with +// server-side apply managed fields +func NewLastAppliedUpdater(fieldManager Manager) Manager { + return &lastAppliedUpdater{ + fieldManager: fieldManager, + } +} + +// Update implements Manager. +func (f *lastAppliedUpdater) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + return f.fieldManager.Update(liveObj, newObj, managed, manager) +} + +// server-side apply managed fields +func (f *lastAppliedUpdater) Apply(liveObj, newObj runtime.Object, managed Managed, manager string, force bool) (runtime.Object, Managed, error) { + liveObj, managed, err := f.fieldManager.Apply(liveObj, newObj, managed, manager, force) + if err != nil { + return liveObj, managed, err + } + + // Sync the client-side apply annotation only from kubectl server-side apply. + // To opt-out of this behavior, users may specify a different field manager. + // + // If the client-side apply annotation doesn't exist, + // then continue because we have no annotation to update + if manager == "kubectl" && hasLastApplied(liveObj) { + lastAppliedValue, err := buildLastApplied(newObj) + if err != nil { + return nil, nil, fmt.Errorf("failed to build last-applied annotation: %v", err) + } + err = SetLastApplied(liveObj, lastAppliedValue) + if err != nil { + return nil, nil, fmt.Errorf("failed to set last-applied annotation: %v", err) + } + } + return liveObj, managed, err +} + +func hasLastApplied(obj runtime.Object) bool { + var accessor, err = meta.Accessor(obj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + var annotations = accessor.GetAnnotations() + if annotations == nil { + return false + } + lastApplied, ok := annotations[LastAppliedConfigAnnotation] + return ok && len(lastApplied) > 0 +} + +func buildLastApplied(obj runtime.Object) (string, error) { + obj = obj.DeepCopyObject() + + var accessor, err = meta.Accessor(obj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + + // Remove the annotation from the object before encoding the object + var annotations = accessor.GetAnnotations() + delete(annotations, LastAppliedConfigAnnotation) + accessor.SetAnnotations(annotations) + + lastApplied, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj) + if err != nil { + return "", fmt.Errorf("couldn't encode object into last applied annotation: %v", err) + } + return string(lastApplied), nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfields.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfields.go new file mode 100644 index 0000000000..9b4c203262 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfields.go @@ -0,0 +1,248 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "encoding/json" + "fmt" + "sort" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +// ManagedInterface groups a fieldpath.ManagedFields together with the timestamps associated with each operation. +type ManagedInterface interface { + // Fields gets the fieldpath.ManagedFields. + Fields() fieldpath.ManagedFields + + // Times gets the timestamps associated with each operation. + Times() map[string]*metav1.Time +} + +type managedStruct struct { + fields fieldpath.ManagedFields + times map[string]*metav1.Time +} + +var _ ManagedInterface = &managedStruct{} + +// Fields implements ManagedInterface. +func (m *managedStruct) Fields() fieldpath.ManagedFields { + return m.fields +} + +// Times implements ManagedInterface. +func (m *managedStruct) Times() map[string]*metav1.Time { + return m.times +} + +// NewEmptyManaged creates an empty ManagedInterface. +func NewEmptyManaged() ManagedInterface { + return NewManaged(fieldpath.ManagedFields{}, map[string]*metav1.Time{}) +} + +// NewManaged creates a ManagedInterface from a fieldpath.ManagedFields and the timestamps associated with each operation. +func NewManaged(f fieldpath.ManagedFields, t map[string]*metav1.Time) ManagedInterface { + return &managedStruct{ + fields: f, + times: t, + } +} + +// RemoveObjectManagedFields removes the ManagedFields from the object +// before we merge so that it doesn't appear in the ManagedFields +// recursively. +func RemoveObjectManagedFields(obj runtime.Object) { + accessor, err := meta.Accessor(obj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + accessor.SetManagedFields(nil) +} + +// EncodeObjectManagedFields converts and stores the fieldpathManagedFields into the objects ManagedFields +func EncodeObjectManagedFields(obj runtime.Object, managed ManagedInterface) error { + accessor, err := meta.Accessor(obj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + + encodedManagedFields, err := encodeManagedFields(managed) + if err != nil { + return fmt.Errorf("failed to convert back managed fields to API: %v", err) + } + accessor.SetManagedFields(encodedManagedFields) + + return nil +} + +// DecodeManagedFields converts ManagedFields from the wire format (api format) +// to the format used by sigs.k8s.io/structured-merge-diff +func DecodeManagedFields(encodedManagedFields []metav1.ManagedFieldsEntry) (ManagedInterface, error) { + managed := managedStruct{} + managed.fields = make(fieldpath.ManagedFields, len(encodedManagedFields)) + managed.times = make(map[string]*metav1.Time, len(encodedManagedFields)) + + for i, encodedVersionedSet := range encodedManagedFields { + switch encodedVersionedSet.Operation { + case metav1.ManagedFieldsOperationApply, metav1.ManagedFieldsOperationUpdate: + default: + return nil, fmt.Errorf("operation must be `Apply` or `Update`") + } + if len(encodedVersionedSet.APIVersion) < 1 { + return nil, fmt.Errorf("apiVersion must not be empty") + } + switch encodedVersionedSet.FieldsType { + case "FieldsV1": + // Valid case. + case "": + return nil, fmt.Errorf("missing fieldsType in managed fields entry %d", i) + default: + return nil, fmt.Errorf("invalid fieldsType %q in managed fields entry %d", encodedVersionedSet.FieldsType, i) + } + manager, err := BuildManagerIdentifier(&encodedVersionedSet) + if err != nil { + return nil, fmt.Errorf("error decoding manager from %v: %v", encodedVersionedSet, err) + } + managed.fields[manager], err = decodeVersionedSet(&encodedVersionedSet) + if err != nil { + return nil, fmt.Errorf("error decoding versioned set from %v: %v", encodedVersionedSet, err) + } + managed.times[manager] = encodedVersionedSet.Time + } + return &managed, nil +} + +// BuildManagerIdentifier creates a manager identifier string from a ManagedFieldsEntry +func BuildManagerIdentifier(encodedManager *metav1.ManagedFieldsEntry) (manager string, err error) { + encodedManagerCopy := *encodedManager + + // Never include fields type in the manager identifier + encodedManagerCopy.FieldsType = "" + + // Never include the fields in the manager identifier + encodedManagerCopy.FieldsV1 = nil + + // Never include the time in the manager identifier + encodedManagerCopy.Time = nil + + // For appliers, don't include the APIVersion in the manager identifier, + // so it will always have the same manager identifier each time it applied. + if encodedManager.Operation == metav1.ManagedFieldsOperationApply { + encodedManagerCopy.APIVersion = "" + } + + // Use the remaining fields to build the manager identifier + b, err := json.Marshal(&encodedManagerCopy) + if err != nil { + return "", fmt.Errorf("error marshalling manager identifier: %v", err) + } + + return string(b), nil +} + +func decodeVersionedSet(encodedVersionedSet *metav1.ManagedFieldsEntry) (versionedSet fieldpath.VersionedSet, err error) { + fields := EmptyFields + if encodedVersionedSet.FieldsV1 != nil { + fields = *encodedVersionedSet.FieldsV1 + } + set, err := FieldsToSet(fields) + if err != nil { + return nil, fmt.Errorf("error decoding set: %v", err) + } + return fieldpath.NewVersionedSet(&set, fieldpath.APIVersion(encodedVersionedSet.APIVersion), encodedVersionedSet.Operation == metav1.ManagedFieldsOperationApply), nil +} + +// encodeManagedFields converts ManagedFields from the format used by +// sigs.k8s.io/structured-merge-diff to the wire format (api format) +func encodeManagedFields(managed ManagedInterface) (encodedManagedFields []metav1.ManagedFieldsEntry, err error) { + if len(managed.Fields()) == 0 { + return nil, nil + } + encodedManagedFields = []metav1.ManagedFieldsEntry{} + for manager := range managed.Fields() { + versionedSet := managed.Fields()[manager] + v, err := encodeManagerVersionedSet(manager, versionedSet) + if err != nil { + return nil, fmt.Errorf("error encoding versioned set for %v: %v", manager, err) + } + if t, ok := managed.Times()[manager]; ok { + v.Time = t + } + encodedManagedFields = append(encodedManagedFields, *v) + } + return sortEncodedManagedFields(encodedManagedFields) +} + +func sortEncodedManagedFields(encodedManagedFields []metav1.ManagedFieldsEntry) (sortedManagedFields []metav1.ManagedFieldsEntry, err error) { + sort.Slice(encodedManagedFields, func(i, j int) bool { + p, q := encodedManagedFields[i], encodedManagedFields[j] + + if p.Operation != q.Operation { + return p.Operation < q.Operation + } + + pSeconds, qSeconds := int64(0), int64(0) + if p.Time != nil { + pSeconds = p.Time.Unix() + } + if q.Time != nil { + qSeconds = q.Time.Unix() + } + if pSeconds != qSeconds { + return pSeconds < qSeconds + } + + if p.Manager != q.Manager { + return p.Manager < q.Manager + } + + if p.APIVersion != q.APIVersion { + return p.APIVersion < q.APIVersion + } + return p.Subresource < q.Subresource + }) + + return encodedManagedFields, nil +} + +func encodeManagerVersionedSet(manager string, versionedSet fieldpath.VersionedSet) (encodedVersionedSet *metav1.ManagedFieldsEntry, err error) { + encodedVersionedSet = &metav1.ManagedFieldsEntry{} + + // Get as many fields as we can from the manager identifier + err = json.Unmarshal([]byte(manager), encodedVersionedSet) + if err != nil { + return nil, fmt.Errorf("error unmarshalling manager identifier %v: %v", manager, err) + } + + // Get the APIVersion, Operation, and Fields from the VersionedSet + encodedVersionedSet.APIVersion = string(versionedSet.APIVersion()) + if versionedSet.Applied() { + encodedVersionedSet.Operation = metav1.ManagedFieldsOperationApply + } + encodedVersionedSet.FieldsType = "FieldsV1" + fields, err := SetToFields(*versionedSet.Set()) + if err != nil { + return nil, fmt.Errorf("error encoding set: %v", err) + } + encodedVersionedSet.FieldsV1 = &fields + + return encodedVersionedSet, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfieldsupdater.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfieldsupdater.go new file mode 100644 index 0000000000..376eed6b20 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfieldsupdater.go @@ -0,0 +1,82 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +type managedFieldsUpdater struct { + fieldManager Manager +} + +var _ Manager = &managedFieldsUpdater{} + +// NewManagedFieldsUpdater is responsible for updating the managedfields +// in the object, updating the time of the operation as necessary. For +// updates, it uses a hard-coded manager to detect if things have +// changed, and swaps back the correct manager after the operation is +// done. +func NewManagedFieldsUpdater(fieldManager Manager) Manager { + return &managedFieldsUpdater{ + fieldManager: fieldManager, + } +} + +// Update implements Manager. +func (f *managedFieldsUpdater) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + self := "current-operation" + object, managed, err := f.fieldManager.Update(liveObj, newObj, managed, self) + if err != nil { + return object, managed, err + } + + // If the current operation took any fields from anything, it means the object changed, + // so update the timestamp of the managedFieldsEntry and merge with any previous updates from the same manager + if vs, ok := managed.Fields()[self]; ok { + delete(managed.Fields(), self) + + if previous, ok := managed.Fields()[manager]; ok { + managed.Fields()[manager] = fieldpath.NewVersionedSet(vs.Set().Union(previous.Set()), vs.APIVersion(), vs.Applied()) + } else { + managed.Fields()[manager] = vs + } + + managed.Times()[manager] = &metav1.Time{Time: time.Now().UTC()} + } + + return object, managed, nil +} + +// Apply implements Manager. +func (f *managedFieldsUpdater) Apply(liveObj, appliedObj runtime.Object, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) { + object, managed, err := f.fieldManager.Apply(liveObj, appliedObj, managed, fieldManager, force) + if err != nil { + return object, managed, err + } + if object != nil { + managed.Times()[fieldManager] = &metav1.Time{Time: time.Now().UTC()} + } else { + object = liveObj.DeepCopyObject() + RemoveObjectManagedFields(object) + } + return object, managed, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/manager.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/manager.go new file mode 100644 index 0000000000..053936103d --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/manager.go @@ -0,0 +1,52 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +// Managed groups a fieldpath.ManagedFields together with the timestamps associated with each operation. +type Managed interface { + // Fields gets the fieldpath.ManagedFields. + Fields() fieldpath.ManagedFields + + // Times gets the timestamps associated with each operation. + Times() map[string]*metav1.Time +} + +// Manager updates the managed fields and merges applied configurations. +type Manager interface { + // Update is used when the object has already been merged (non-apply + // use-case), and simply updates the managed fields in the output + // object. + // * `liveObj` is not mutated by this function + // * `newObj` may be mutated by this function + // Returns the new object with managedFields removed, and the object's new + // proposed managedFields separately. + Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) + + // Apply is used when server-side apply is called, as it merges the + // object and updates the managed fields. + // * `liveObj` is not mutated by this function + // * `newObj` may be mutated by this function + // Returns the new object with managedFields removed, and the object's new + // proposed managedFields separately. + Apply(liveObj, appliedObj runtime.Object, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/pathelement.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/pathelement.go new file mode 100644 index 0000000000..1954d65d32 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/pathelement.go @@ -0,0 +1,140 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "encoding/json" + "errors" + "fmt" + "strconv" + "strings" + + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +const ( + // Field indicates that the content of this path element is a field's name + Field = "f" + + // Value indicates that the content of this path element is a field's value + Value = "v" + + // Index indicates that the content of this path element is an index in an array + Index = "i" + + // Key indicates that the content of this path element is a key value map + Key = "k" + + // Separator separates the type of a path element from the contents + Separator = ":" +) + +// NewPathElement parses a serialized path element +func NewPathElement(s string) (fieldpath.PathElement, error) { + split := strings.SplitN(s, Separator, 2) + if len(split) < 2 { + return fieldpath.PathElement{}, fmt.Errorf("missing colon: %v", s) + } + switch split[0] { + case Field: + return fieldpath.PathElement{ + FieldName: &split[1], + }, nil + case Value: + val, err := value.FromJSON([]byte(split[1])) + if err != nil { + return fieldpath.PathElement{}, err + } + return fieldpath.PathElement{ + Value: &val, + }, nil + case Index: + i, err := strconv.Atoi(split[1]) + if err != nil { + return fieldpath.PathElement{}, err + } + return fieldpath.PathElement{ + Index: &i, + }, nil + case Key: + kv := map[string]json.RawMessage{} + err := json.Unmarshal([]byte(split[1]), &kv) + if err != nil { + return fieldpath.PathElement{}, err + } + fields := value.FieldList{} + for k, v := range kv { + b, err := json.Marshal(v) + if err != nil { + return fieldpath.PathElement{}, err + } + val, err := value.FromJSON(b) + if err != nil { + return fieldpath.PathElement{}, err + } + + fields = append(fields, value.Field{ + Name: k, + Value: val, + }) + } + return fieldpath.PathElement{ + Key: &fields, + }, nil + default: + // Ignore unknown key types + return fieldpath.PathElement{}, nil + } +} + +// PathElementString serializes a path element +func PathElementString(pe fieldpath.PathElement) (string, error) { + switch { + case pe.FieldName != nil: + return Field + Separator + *pe.FieldName, nil + case pe.Key != nil: + kv := map[string]json.RawMessage{} + for _, k := range *pe.Key { + b, err := value.ToJSON(k.Value) + if err != nil { + return "", err + } + m := json.RawMessage{} + err = json.Unmarshal(b, &m) + if err != nil { + return "", err + } + kv[k.Name] = m + } + b, err := json.Marshal(kv) + if err != nil { + return "", err + } + return Key + ":" + string(b), nil + case pe.Value != nil: + b, err := value.ToJSON(*pe.Value) + if err != nil { + return "", err + } + return Value + ":" + string(b), nil + case pe.Index != nil: + return Index + ":" + strconv.Itoa(*pe.Index), nil + default: + return "", errors.New("Invalid type of path element") + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/skipnonapplied.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/skipnonapplied.go new file mode 100644 index 0000000000..6b281ec1e5 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/skipnonapplied.go @@ -0,0 +1,91 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + "math/rand" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type skipNonAppliedManager struct { + fieldManager Manager + objectCreater runtime.ObjectCreater + gvk schema.GroupVersionKind + beforeApplyManagerName string + probability float32 +} + +var _ Manager = &skipNonAppliedManager{} + +// NewSkipNonAppliedManager creates a new wrapped FieldManager that only starts tracking managers after the first apply. +func NewSkipNonAppliedManager(fieldManager Manager, objectCreater runtime.ObjectCreater, gvk schema.GroupVersionKind) Manager { + return NewProbabilisticSkipNonAppliedManager(fieldManager, objectCreater, gvk, 0.0) +} + +// NewProbabilisticSkipNonAppliedManager creates a new wrapped FieldManager that starts tracking managers after the first apply, +// or starts tracking on create with p probability. +func NewProbabilisticSkipNonAppliedManager(fieldManager Manager, objectCreater runtime.ObjectCreater, gvk schema.GroupVersionKind, p float32) Manager { + return &skipNonAppliedManager{ + fieldManager: fieldManager, + objectCreater: objectCreater, + gvk: gvk, + beforeApplyManagerName: "before-first-apply", + probability: p, + } +} + +// Update implements Manager. +func (f *skipNonAppliedManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + accessor, err := meta.Accessor(liveObj) + if err != nil { + return newObj, managed, nil + } + + // If managed fields is empty, we need to determine whether to skip tracking managed fields. + if len(managed.Fields()) == 0 { + // Check if the operation is a create, by checking whether lastObj's UID is empty. + // If the operation is create, P(tracking managed fields) = f.probability + // If the operation is update, skip tracking managed fields, since we already know managed fields is empty. + if len(accessor.GetUID()) == 0 { + if f.probability <= rand.Float32() { + return newObj, managed, nil + } + } else { + return newObj, managed, nil + } + } + return f.fieldManager.Update(liveObj, newObj, managed, manager) +} + +// Apply implements Manager. +func (f *skipNonAppliedManager) Apply(liveObj, appliedObj runtime.Object, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) { + if len(managed.Fields()) == 0 { + emptyObj, err := f.objectCreater.New(f.gvk) + if err != nil { + return nil, nil, fmt.Errorf("failed to create empty object of type %v: %v", f.gvk, err) + } + liveObj, managed, err = f.fieldManager.Update(emptyObj, liveObj, managed, f.beforeApplyManagerName) + if err != nil { + return nil, nil, fmt.Errorf("failed to create manager for existing fields: %v", err) + } + } + return f.fieldManager.Apply(liveObj, appliedObj, managed, fieldManager, force) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/stripmeta.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/stripmeta.go new file mode 100644 index 0000000000..9b61f3a6f3 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/stripmeta.go @@ -0,0 +1,90 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +type stripMetaManager struct { + fieldManager Manager + + // stripSet is the list of fields that should never be part of a mangedFields. + stripSet *fieldpath.Set +} + +var _ Manager = &stripMetaManager{} + +// NewStripMetaManager creates a new Manager that strips metadata and typemeta fields from the manager's fieldset. +func NewStripMetaManager(fieldManager Manager) Manager { + return &stripMetaManager{ + fieldManager: fieldManager, + stripSet: fieldpath.NewSet( + fieldpath.MakePathOrDie("apiVersion"), + fieldpath.MakePathOrDie("kind"), + fieldpath.MakePathOrDie("metadata"), + fieldpath.MakePathOrDie("metadata", "name"), + fieldpath.MakePathOrDie("metadata", "namespace"), + fieldpath.MakePathOrDie("metadata", "creationTimestamp"), + fieldpath.MakePathOrDie("metadata", "selfLink"), + fieldpath.MakePathOrDie("metadata", "uid"), + fieldpath.MakePathOrDie("metadata", "clusterName"), + fieldpath.MakePathOrDie("metadata", "generation"), + fieldpath.MakePathOrDie("metadata", "managedFields"), + fieldpath.MakePathOrDie("metadata", "resourceVersion"), + ), + } +} + +// Update implements Manager. +func (f *stripMetaManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + newObj, managed, err := f.fieldManager.Update(liveObj, newObj, managed, manager) + if err != nil { + return nil, nil, err + } + f.stripFields(managed.Fields(), manager) + return newObj, managed, nil +} + +// Apply implements Manager. +func (f *stripMetaManager) Apply(liveObj, appliedObj runtime.Object, managed Managed, manager string, force bool) (runtime.Object, Managed, error) { + newObj, managed, err := f.fieldManager.Apply(liveObj, appliedObj, managed, manager, force) + if err != nil { + return nil, nil, err + } + f.stripFields(managed.Fields(), manager) + return newObj, managed, nil +} + +// stripFields removes a predefined set of paths found in typed from managed +func (f *stripMetaManager) stripFields(managed fieldpath.ManagedFields, manager string) { + vs, ok := managed[manager] + if ok { + if vs == nil { + panic(fmt.Sprintf("Found unexpected nil manager which should never happen: %s", manager)) + } + newSet := vs.Set().Difference(f.stripSet) + if newSet.Empty() { + delete(managed, manager) + } else { + managed[manager] = fieldpath.NewVersionedSet(newSet, vs.APIVersion(), vs.Applied()) + } + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go new file mode 100644 index 0000000000..2112c9ab7e --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go @@ -0,0 +1,186 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/merge" +) + +type structuredMergeManager struct { + typeConverter TypeConverter + objectConverter runtime.ObjectConvertor + objectDefaulter runtime.ObjectDefaulter + groupVersion schema.GroupVersion + hubVersion schema.GroupVersion + updater merge.Updater +} + +var _ Manager = &structuredMergeManager{} + +// NewStructuredMergeManager creates a new Manager that merges apply requests +// and update managed fields for other types of requests. +func NewStructuredMergeManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, gv schema.GroupVersion, hub schema.GroupVersion, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (Manager, error) { + if typeConverter == nil { + return nil, fmt.Errorf("typeconverter must not be nil") + } + return &structuredMergeManager{ + typeConverter: typeConverter, + objectConverter: objectConverter, + objectDefaulter: objectDefaulter, + groupVersion: gv, + hubVersion: hub, + updater: merge.Updater{ + Converter: newVersionConverter(typeConverter, objectConverter, hub), // This is the converter provided to SMD from k8s + IgnoredFields: resetFields, + }, + }, nil +} + +// NewCRDStructuredMergeManager creates a new Manager specifically for +// CRDs. This allows for the possibility of fields which are not defined +// in models, as well as having no models defined at all. +func NewCRDStructuredMergeManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, gv schema.GroupVersion, hub schema.GroupVersion, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (_ Manager, err error) { + return &structuredMergeManager{ + typeConverter: typeConverter, + objectConverter: objectConverter, + objectDefaulter: objectDefaulter, + groupVersion: gv, + hubVersion: hub, + updater: merge.Updater{ + Converter: newCRDVersionConverter(typeConverter, objectConverter, hub), + IgnoredFields: resetFields, + }, + }, nil +} + +func objectGVKNN(obj runtime.Object) string { + name := "" + namespace := "" + if accessor, err := meta.Accessor(obj); err == nil { + name = accessor.GetName() + namespace = accessor.GetNamespace() + } + + return fmt.Sprintf("%v/%v; %v", namespace, name, obj.GetObjectKind().GroupVersionKind()) +} + +// Update implements Manager. +func (f *structuredMergeManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + newObjVersioned, err := f.toVersioned(newObj) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert new object (%v) to proper version (%v): %v", objectGVKNN(newObj), f.groupVersion, err) + } + liveObjVersioned, err := f.toVersioned(liveObj) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert live object (%v) to proper version: %v", objectGVKNN(liveObj), err) + } + newObjTyped, err := f.typeConverter.ObjectToTyped(newObjVersioned) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert new object (%v) to smd typed: %v", objectGVKNN(newObjVersioned), err) + } + liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert live object (%v) to smd typed: %v", objectGVKNN(liveObjVersioned), err) + } + apiVersion := fieldpath.APIVersion(f.groupVersion.String()) + + // TODO(apelisse) use the first return value when unions are implemented + _, managedFields, err := f.updater.Update(liveObjTyped, newObjTyped, apiVersion, managed.Fields(), manager) + if err != nil { + return nil, nil, fmt.Errorf("failed to update ManagedFields (%v): %v", objectGVKNN(newObjVersioned), err) + } + managed = NewManaged(managedFields, managed.Times()) + + return newObj, managed, nil +} + +// Apply implements Manager. +func (f *structuredMergeManager) Apply(liveObj, patchObj runtime.Object, managed Managed, manager string, force bool) (runtime.Object, Managed, error) { + // Check that the patch object has the same version as the live object + if patchVersion := patchObj.GetObjectKind().GroupVersionKind().GroupVersion(); patchVersion != f.groupVersion { + return nil, nil, + errors.NewBadRequest( + fmt.Sprintf("Incorrect version specified in apply patch. "+ + "Specified patch version: %s, expected: %s", + patchVersion, f.groupVersion)) + } + + patchObjMeta, err := meta.Accessor(patchObj) + if err != nil { + return nil, nil, fmt.Errorf("couldn't get accessor: %v", err) + } + if patchObjMeta.GetManagedFields() != nil { + return nil, nil, errors.NewBadRequest("metadata.managedFields must be nil") + } + + liveObjVersioned, err := f.toVersioned(liveObj) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert live object (%v) to proper version: %v", objectGVKNN(liveObj), err) + } + + patchObjTyped, err := f.typeConverter.ObjectToTyped(patchObj) + if err != nil { + return nil, nil, fmt.Errorf("failed to create typed patch object (%v): %v", objectGVKNN(patchObj), err) + } + liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned) + if err != nil { + return nil, nil, fmt.Errorf("failed to create typed live object (%v): %v", objectGVKNN(liveObjVersioned), err) + } + + apiVersion := fieldpath.APIVersion(f.groupVersion.String()) + newObjTyped, managedFields, err := f.updater.Apply(liveObjTyped, patchObjTyped, apiVersion, managed.Fields(), manager, force) + if err != nil { + return nil, nil, err + } + managed = NewManaged(managedFields, managed.Times()) + + if newObjTyped == nil { + return nil, managed, nil + } + + newObj, err := f.typeConverter.TypedToObject(newObjTyped) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert new typed object (%v) to object: %v", objectGVKNN(patchObj), err) + } + + newObjVersioned, err := f.toVersioned(newObj) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert new object (%v) to proper version: %v", objectGVKNN(patchObj), err) + } + f.objectDefaulter.Default(newObjVersioned) + + newObjUnversioned, err := f.toUnversioned(newObjVersioned) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert to unversioned (%v): %v", objectGVKNN(patchObj), err) + } + return newObjUnversioned, managed, nil +} + +func (f *structuredMergeManager) toVersioned(obj runtime.Object) (runtime.Object, error) { + return f.objectConverter.ConvertToVersion(obj, f.groupVersion) +} + +func (f *structuredMergeManager) toUnversioned(obj runtime.Object) (runtime.Object, error) { + return f.objectConverter.ConvertToVersion(obj, f.hubVersion) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/typeconverter.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/typeconverter.go new file mode 100644 index 0000000000..1ac96d7f7b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/typeconverter.go @@ -0,0 +1,193 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kube-openapi/pkg/schemaconv" + "k8s.io/kube-openapi/pkg/validation/spec" + smdschema "sigs.k8s.io/structured-merge-diff/v4/schema" + "sigs.k8s.io/structured-merge-diff/v4/typed" + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +// TypeConverter allows you to convert from runtime.Object to +// typed.TypedValue and the other way around. +type TypeConverter interface { + ObjectToTyped(runtime.Object) (*typed.TypedValue, error) + TypedToObject(*typed.TypedValue) (runtime.Object, error) +} + +type typeConverter struct { + parser map[schema.GroupVersionKind]*typed.ParseableType +} + +var _ TypeConverter = &typeConverter{} + +func NewTypeConverter(openapiSpec map[string]*spec.Schema, preserveUnknownFields bool) (TypeConverter, error) { + typeSchema, err := schemaconv.ToSchemaFromOpenAPI(openapiSpec, preserveUnknownFields) + if err != nil { + return nil, fmt.Errorf("failed to convert models to schema: %v", err) + } + + typeParser := typed.Parser{Schema: smdschema.Schema{Types: typeSchema.Types}} + tr := indexModels(&typeParser, openapiSpec) + + return &typeConverter{parser: tr}, nil +} + +func (c *typeConverter) ObjectToTyped(obj runtime.Object) (*typed.TypedValue, error) { + gvk := obj.GetObjectKind().GroupVersionKind() + t := c.parser[gvk] + if t == nil { + return nil, NewNoCorrespondingTypeError(gvk) + } + switch o := obj.(type) { + case *unstructured.Unstructured: + return t.FromUnstructured(o.UnstructuredContent()) + default: + return t.FromStructured(obj) + } +} + +func (c *typeConverter) TypedToObject(value *typed.TypedValue) (runtime.Object, error) { + return valueToObject(value.AsValue()) +} + +type deducedTypeConverter struct{} + +// DeducedTypeConverter is a TypeConverter for CRDs that don't have a +// schema. It does implement the same interface though (and create the +// same types of objects), so that everything can still work the same. +// CRDs are merged with all their fields being "atomic" (lists +// included). +func NewDeducedTypeConverter() TypeConverter { + return deducedTypeConverter{} +} + +// ObjectToTyped converts an object into a TypedValue with a "deduced type". +func (deducedTypeConverter) ObjectToTyped(obj runtime.Object) (*typed.TypedValue, error) { + switch o := obj.(type) { + case *unstructured.Unstructured: + return typed.DeducedParseableType.FromUnstructured(o.UnstructuredContent()) + default: + return typed.DeducedParseableType.FromStructured(obj) + } +} + +// TypedToObject transforms the typed value into a runtime.Object. That +// is not specific to deduced type. +func (deducedTypeConverter) TypedToObject(value *typed.TypedValue) (runtime.Object, error) { + return valueToObject(value.AsValue()) +} + +func valueToObject(val value.Value) (runtime.Object, error) { + vu := val.Unstructured() + switch o := vu.(type) { + case map[string]interface{}: + return &unstructured.Unstructured{Object: o}, nil + default: + return nil, fmt.Errorf("failed to convert value to unstructured for type %T", vu) + } +} + +func indexModels( + typeParser *typed.Parser, + openAPISchemas map[string]*spec.Schema, +) map[schema.GroupVersionKind]*typed.ParseableType { + tr := map[schema.GroupVersionKind]*typed.ParseableType{} + for modelName, model := range openAPISchemas { + gvkList := parseGroupVersionKind(model.Extensions) + if len(gvkList) == 0 { + continue + } + + parsedType := typeParser.Type(modelName) + for _, gvk := range gvkList { + if len(gvk.Kind) > 0 { + tr[schema.GroupVersionKind(gvk)] = &parsedType + } + } + } + return tr +} + +// Get and parse GroupVersionKind from the extension. Returns empty if it doesn't have one. +func parseGroupVersionKind(extensions map[string]interface{}) []schema.GroupVersionKind { + gvkListResult := []schema.GroupVersionKind{} + + // Get the extensions + gvkExtension, ok := extensions["x-kubernetes-group-version-kind"] + if !ok { + return []schema.GroupVersionKind{} + } + + // gvk extension must be a list of at least 1 element. + gvkList, ok := gvkExtension.([]interface{}) + if !ok { + return []schema.GroupVersionKind{} + } + + for _, gvk := range gvkList { + var group, version, kind string + + // gvk extension list must be a map with group, version, and + // kind fields + if gvkMap, ok := gvk.(map[interface{}]interface{}); ok { + group, ok = gvkMap["group"].(string) + if !ok { + continue + } + version, ok = gvkMap["version"].(string) + if !ok { + continue + } + kind, ok = gvkMap["kind"].(string) + if !ok { + continue + } + + } else if gvkMap, ok := gvk.(map[string]interface{}); ok { + group, ok = gvkMap["group"].(string) + if !ok { + continue + } + version, ok = gvkMap["version"].(string) + if !ok { + continue + } + kind, ok = gvkMap["kind"].(string) + if !ok { + continue + } + } else { + continue + } + + gvkListResult = append(gvkListResult, schema.GroupVersionKind{ + Group: group, + Version: version, + Kind: kind, + }) + } + + return gvkListResult +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/versionconverter.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/versionconverter.go new file mode 100644 index 0000000000..45855fa4ca --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/versionconverter.go @@ -0,0 +1,123 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/merge" + "sigs.k8s.io/structured-merge-diff/v4/typed" +) + +// versionConverter is an implementation of +// sigs.k8s.io/structured-merge-diff/merge.Converter +type versionConverter struct { + typeConverter TypeConverter + objectConvertor runtime.ObjectConvertor + hubGetter func(from schema.GroupVersion) schema.GroupVersion +} + +var _ merge.Converter = &versionConverter{} + +// NewVersionConverter builds a VersionConverter from a TypeConverter and an ObjectConvertor. +func newVersionConverter(t TypeConverter, o runtime.ObjectConvertor, h schema.GroupVersion) merge.Converter { + return &versionConverter{ + typeConverter: t, + objectConvertor: o, + hubGetter: func(from schema.GroupVersion) schema.GroupVersion { + return schema.GroupVersion{ + Group: from.Group, + Version: h.Version, + } + }, + } +} + +// NewCRDVersionConverter builds a VersionConverter for CRDs from a TypeConverter and an ObjectConvertor. +func newCRDVersionConverter(t TypeConverter, o runtime.ObjectConvertor, h schema.GroupVersion) merge.Converter { + return &versionConverter{ + typeConverter: t, + objectConvertor: o, + hubGetter: func(from schema.GroupVersion) schema.GroupVersion { + return h + }, + } +} + +// Convert implements sigs.k8s.io/structured-merge-diff/merge.Converter +func (v *versionConverter) Convert(object *typed.TypedValue, version fieldpath.APIVersion) (*typed.TypedValue, error) { + // Convert the smd typed value to a kubernetes object. + objectToConvert, err := v.typeConverter.TypedToObject(object) + if err != nil { + return object, err + } + + // Parse the target groupVersion. + groupVersion, err := schema.ParseGroupVersion(string(version)) + if err != nil { + return object, err + } + + // If attempting to convert to the same version as we already have, just return it. + fromVersion := objectToConvert.GetObjectKind().GroupVersionKind().GroupVersion() + if fromVersion == groupVersion { + return object, nil + } + + // Convert to internal + internalObject, err := v.objectConvertor.ConvertToVersion(objectToConvert, v.hubGetter(fromVersion)) + if err != nil { + return object, err + } + + // Convert the object into the target version + convertedObject, err := v.objectConvertor.ConvertToVersion(internalObject, groupVersion) + if err != nil { + return object, err + } + + // Convert the object back to a smd typed value and return it. + return v.typeConverter.ObjectToTyped(convertedObject) +} + +// IsMissingVersionError +func (v *versionConverter) IsMissingVersionError(err error) bool { + return runtime.IsNotRegisteredError(err) || isNoCorrespondingTypeError(err) +} + +type noCorrespondingTypeErr struct { + gvk schema.GroupVersionKind +} + +func NewNoCorrespondingTypeError(gvk schema.GroupVersionKind) error { + return &noCorrespondingTypeErr{gvk: gvk} +} + +func (k *noCorrespondingTypeErr) Error() string { + return fmt.Sprintf("no corresponding type for %v", k.gvk) +} + +func isNoCorrespondingTypeError(err error) bool { + if err == nil { + return false + } + _, ok := err.(*noCorrespondingTypeErr) + return ok +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/node.yaml b/vendor/k8s.io/apimachinery/pkg/util/managedfields/node.yaml new file mode 100644 index 0000000000..66e849f23f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/node.yaml @@ -0,0 +1,261 @@ +apiVersion: v1 +kind: Node +metadata: + annotations: + container.googleapis.com/instance_id: "123456789321654789" + node.alpha.kubernetes.io/ttl: "0" + volumes.kubernetes.io/controller-managed-attach-detach: "true" + creationTimestamp: "2019-07-09T16:17:29Z" + labels: + kubernetes.io/arch: amd64 + beta.kubernetes.io/fluentd-ds-ready: "true" + beta.kubernetes.io/instance-type: n1-standard-4 + kubernetes.io/os: linux + cloud.google.com/gke-nodepool: default-pool + cloud.google.com/gke-os-distribution: cos + failure-domain.beta.kubernetes.io/region: us-central1 + failure-domain.beta.kubernetes.io/zone: us-central1-b + topology.kubernetes.io/region: us-central1 + topology.kubernetes.io/zone: us-central1-b + kubernetes.io/hostname: node-default-pool-something + name: node-default-pool-something + resourceVersion: "211582541" + selfLink: /api/v1/nodes/node-default-pool-something + uid: 0c24d0e1-a265-11e9-abe4-42010a80026b +spec: + podCIDR: 10.0.0.1/24 + providerID: some-provider-id-of-some-sort +status: + addresses: + - address: 10.0.0.1 + type: InternalIP + - address: 192.168.0.1 + type: ExternalIP + - address: node-default-pool-something + type: Hostname + allocatable: + cpu: 3920m + ephemeral-storage: "104638878617" + hugepages-2Mi: "0" + memory: 12700100Ki + pods: "110" + capacity: + cpu: "4" + ephemeral-storage: 202086868Ki + hugepages-2Mi: "0" + memory: 15399364Ki + pods: "110" + conditions: + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:22:08Z" + message: containerd is functioning properly + reason: FrequentContainerdRestart + status: "False" + type: FrequentContainerdRestart + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:22:06Z" + message: docker overlay2 is functioning properly + reason: CorruptDockerOverlay2 + status: "False" + type: CorruptDockerOverlay2 + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:22:06Z" + message: node is functioning properly + reason: UnregisterNetDevice + status: "False" + type: FrequentUnregisterNetDevice + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:17:04Z" + message: kernel has no deadlock + reason: KernelHasNoDeadlock + status: "False" + type: KernelDeadlock + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:17:04Z" + message: Filesystem is not read-only + reason: FilesystemIsNotReadOnly + status: "False" + type: ReadonlyFilesystem + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:22:05Z" + message: kubelet is functioning properly + reason: FrequentKubeletRestart + status: "False" + type: FrequentKubeletRestart + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:22:06Z" + message: docker is functioning properly + reason: FrequentDockerRestart + status: "False" + type: FrequentDockerRestart + - lastHeartbeatTime: "2019-07-09T16:17:47Z" + lastTransitionTime: "2019-07-09T16:17:47Z" + message: RouteController created a route + reason: RouteCreated + status: "False" + type: NetworkUnavailable + - lastHeartbeatTime: "2019-09-20T19:32:50Z" + lastTransitionTime: "2019-07-09T16:17:29Z" + message: kubelet has sufficient disk space available + reason: KubeletHasSufficientDisk + status: "False" + type: OutOfDisk + - lastHeartbeatTime: "2019-09-20T19:32:50Z" + lastTransitionTime: "2019-07-09T16:17:29Z" + message: kubelet has sufficient memory available + reason: KubeletHasSufficientMemory + status: "False" + type: MemoryPressure + - lastHeartbeatTime: "2019-09-20T19:32:50Z" + lastTransitionTime: "2019-07-09T16:17:29Z" + message: kubelet has no disk pressure + reason: KubeletHasNoDiskPressure + status: "False" + type: DiskPressure + - lastHeartbeatTime: "2019-09-20T19:32:50Z" + lastTransitionTime: "2019-07-09T16:17:29Z" + message: kubelet has sufficient PID available + reason: KubeletHasSufficientPID + status: "False" + type: PIDPressure + - lastHeartbeatTime: "2019-09-20T19:32:50Z" + lastTransitionTime: "2019-07-09T16:17:49Z" + message: kubelet is posting ready status. AppArmor enabled + reason: KubeletReady + status: "True" + type: Ready + daemonEndpoints: + kubeletEndpoint: + Port: 10250 + images: + - names: + - grafana/grafana@sha256:80e5e113a984d74836aa16f5b4524012099436b1a50df293f00ac6377fb512c8 + - grafana/grafana:4.4.2 + sizeBytes: 287008013 + - names: + - registry.k8s.io/node-problem-detector@sha256:f95cab985c26b2f46e9bd43283e0bfa88860c14e0fb0649266babe8b65e9eb2b + - registry.k8s.io/node-problem-detector:v0.4.1 + sizeBytes: 286572743 + - names: + - grafana/grafana@sha256:7ff7f9b2501a5d55b55ce3f58d21771b1c5af1f2a4ab7dbf11bef7142aae7033 + - grafana/grafana:4.2.0 + sizeBytes: 277940263 + - names: + - influxdb@sha256:7dddf03376348876ed4bdf33d6dfa3326f45a2bae0930dbd80781a374eb519bc + - influxdb:1.2.2 + sizeBytes: 223948571 + - names: + - gcr.io/stackdriver-agents/stackdriver-logging-agent@sha256:f8d5231b67b9c53f60068b535a11811d29d1b3efd53d2b79f2a2591ea338e4f2 + - gcr.io/stackdriver-agents/stackdriver-logging-agent:0.6-1.6.0-1 + sizeBytes: 223242132 + - names: + - nginx@sha256:35779791c05d119df4fe476db8f47c0bee5943c83eba5656a15fc046db48178b + - nginx:1.10.1 + sizeBytes: 180708613 + - names: + - registry.k8s.io/fluentd-elasticsearch@sha256:b8c94527b489fb61d3d81ce5ad7f3ddbb7be71e9620a3a36e2bede2f2e487d73 + - registry.k8s.io/fluentd-elasticsearch:v2.0.4 + sizeBytes: 135716379 + - names: + - nginx@sha256:00be67d6ba53d5318cd91c57771530f5251cfbe028b7be2c4b70526f988cfc9f + - nginx:latest + sizeBytes: 109357355 + - names: + - registry.k8s.io/kubernetes-dashboard-amd64@sha256:dc4026c1b595435ef5527ca598e1e9c4343076926d7d62b365c44831395adbd0 + - registry.k8s.io/kubernetes-dashboard-amd64:v1.8.3 + sizeBytes: 102319441 + - names: + - gcr.io/google_containers/kube-proxy:v1.11.10-gke.5 + - registry.k8s.io/kube-proxy:v1.11.10-gke.5 + sizeBytes: 102279340 + - names: + - registry.k8s.io/event-exporter@sha256:7f9cd7cb04d6959b0aa960727d04fa86759008048c785397b7b0d9dff0007516 + - registry.k8s.io/event-exporter:v0.2.3 + sizeBytes: 94171943 + - names: + - registry.k8s.io/prometheus-to-sd@sha256:6c0c742475363d537ff059136e5d5e4ab1f512ee0fd9b7ca42ea48bc309d1662 + - registry.k8s.io/prometheus-to-sd:v0.3.1 + sizeBytes: 88077694 + - names: + - registry.k8s.io/fluentd-gcp-scaler@sha256:a5ace7506d393c4ed65eb2cbb6312c64ab357fcea16dff76b9055bc6e498e5ff + - registry.k8s.io/fluentd-gcp-scaler:0.5.1 + sizeBytes: 86637208 + - names: + - registry.k8s.io/heapster-amd64@sha256:9fae0af136ce0cf4f88393b3670f7139ffc464692060c374d2ae748e13144521 + - registry.k8s.io/heapster-amd64:v1.6.0-beta.1 + sizeBytes: 76016169 + - names: + - registry.k8s.io/ingress-glbc-amd64@sha256:31d36bbd9c44caffa135fc78cf0737266fcf25e3cf0cd1c2fcbfbc4f7309cc52 + - registry.k8s.io/ingress-glbc-amd64:v1.1.1 + sizeBytes: 67801919 + - names: + - registry.k8s.io/kube-addon-manager@sha256:d53486c3a0b49ebee019932878dc44232735d5622a51dbbdcec7124199020d09 + - registry.k8s.io/kube-addon-manager:v8.7 + sizeBytes: 63322109 + - names: + - nginx@sha256:4aacdcf186934dcb02f642579314075910f1855590fd3039d8fa4c9f96e48315 + - nginx:1.10-alpine + sizeBytes: 54042627 + - names: + - registry.k8s.io/cpvpa-amd64@sha256:cfe7b0a11c9c8e18c87b1eb34fef9a7cbb8480a8da11fc2657f78dbf4739f869 + - registry.k8s.io/cpvpa-amd64:v0.6.0 + sizeBytes: 51785854 + - names: + - registry.k8s.io/cluster-proportional-autoscaler-amd64@sha256:003f98d9f411ddfa6ff6d539196355e03ddd69fa4ed38c7ffb8fec6f729afe2d + - registry.k8s.io/cluster-proportional-autoscaler-amd64:1.1.2-r2 + sizeBytes: 49648481 + - names: + - registry.k8s.io/ip-masq-agent-amd64@sha256:1ffda57d87901bc01324c82ceb2145fe6a0448d3f0dd9cb65aa76a867cd62103 + - registry.k8s.io/ip-masq-agent-amd64:v2.1.1 + sizeBytes: 49612505 + - names: + - registry.k8s.io/k8s-dns-kube-dns-amd64@sha256:b99fc3eee2a9f052f7eb4cc00f15eb12fc405fa41019baa2d6b79847ae7284a8 + - registry.k8s.io/k8s-dns-kube-dns-amd64:1.14.10 + sizeBytes: 49549457 + - names: + - registry.k8s.io/rescheduler@sha256:156cfbfd05a5a815206fd2eeb6cbdaf1596d71ea4b415d3a6c43071dd7b99450 + - registry.k8s.io/rescheduler:v0.4.0 + sizeBytes: 48973149 + - names: + - registry.k8s.io/event-exporter@sha256:16ca66e2b5dc7a1ce6a5aafcb21d0885828b75cdfc08135430480f7ad2364adc + - registry.k8s.io/event-exporter:v0.2.4 + sizeBytes: 47261019 + - names: + - registry.k8s.io/coredns@sha256:db2bf53126ed1c761d5a41f24a1b82a461c85f736ff6e90542e9522be4757848 + - registry.k8s.io/coredns:1.1.3 + sizeBytes: 45587362 + - names: + - prom/prometheus@sha256:483f4c9d7733699ba79facca9f8bcce1cef1af43dfc3e7c5a1882aa85f53cb74 + - prom/prometheus:v1.1.3 + sizeBytes: 45493941 + nodeInfo: + architecture: amd64 + bootID: a32eca78-4ad4-4b76-9252-f143d6c2ae61 + containerRuntimeVersion: docker://17.3.2 + kernelVersion: 4.14.127+ + kubeProxyVersion: v1.11.10-gke.5 + kubeletVersion: v1.11.10-gke.5 + machineID: 1739555e5b231057f0f9a0b5fa29511b + operatingSystem: linux + osImage: Container-Optimized OS from Google + systemUUID: 1739555E-5B23-1057-F0F9-A0B5FA29511B + volumesAttached: + - devicePath: /dev/disk/by-id/b9772-pvc-c787c67d-14d7-11e7-9baf-42010a800049 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-c787c67d-14d7-11e7-9baf-42010a800049 + - devicePath: /dev/disk/by-id/b9772-pvc-8895a852-fd42-11e6-94d4-42010a800049 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-8895a852-fd42-11e6-94d4-42010a800049 + - devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-72e1c7f1-fd41-11e6-94d4-42010a800049 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-72e1c7f1-fd41-11e6-94d4-42010a800049 + - devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-c2435a06-14d7-11e7-9baf-42010a800049 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-c2435a06-14d7-11e7-9baf-42010a800049 + - devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-8bf50554-fd42-11e6-94d4-42010a800049 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-8bf50554-fd42-11e6-94d4-42010a800049 + - devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-8fb5e386-4641-11e7-a490-42010a800283 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-8fb5e386-4641-11e7-a490-42010a800283 + volumesInUse: + - kubernetes.io/pd/some-random-clusterb9772-pvc-72e1c7f1-fd41-11e6-94d4-42010a800049 + - kubernetes.io/pd/some-random-clusterb9772-pvc-8895a852-fd42-11e6-94d4-42010a800049 + - kubernetes.io/pd/some-random-clusterb9772-pvc-8bf50554-fd42-11e6-94d4-42010a800049 + - kubernetes.io/pd/some-random-clusterb9772-pvc-8fb5e386-4641-11e7-a490-42010a800283 + - kubernetes.io/pd/some-random-clusterb9772-pvc-c2435a06-14d7-11e7-9baf-42010a800049 + - kubernetes.io/pd/some-random-clusterb9772-pvc-c787c67d-14d7-11e7-9baf-42010a800049 diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/pod.yaml b/vendor/k8s.io/apimachinery/pkg/util/managedfields/pod.yaml new file mode 100644 index 0000000000..3fb0877d67 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/pod.yaml @@ -0,0 +1,121 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + app: some-app + plugin1: some-value + plugin2: some-value + plugin3: some-value + plugin4: some-value + name: some-name + namespace: default + ownerReferences: + - apiVersion: apps/v1 + blockOwnerDeletion: true + controller: true + kind: ReplicaSet + name: some-name + uid: 0a9d2b9e-779e-11e7-b422-42010a8001be +spec: + containers: + - args: + - one + - two + - three + - four + - five + - six + - seven + - eight + - nine + env: + - name: VAR_3 + valueFrom: + secretKeyRef: + key: some-other-key + name: some-oher-name + - name: VAR_2 + valueFrom: + secretKeyRef: + key: other-key + name: other-name + - name: VAR_1 + valueFrom: + secretKeyRef: + key: some-key + name: some-name + image: some-image-name + imagePullPolicy: IfNotPresent + name: some-name + resources: + requests: + cpu: '0' + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: default-token-hu5jz + readOnly: true + dnsPolicy: ClusterFirst + nodeName: node-name + priority: 0 + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 30 + tolerations: + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 300 + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 300 + volumes: + - name: default-token-hu5jz + secret: + defaultMode: 420 + secretName: default-token-hu5jz +status: + conditions: + - lastProbeTime: null + lastTransitionTime: '2019-07-08T09:31:18Z' + status: 'True' + type: Initialized + - lastProbeTime: null + lastTransitionTime: '2019-07-08T09:41:59Z' + status: 'True' + type: Ready + - lastProbeTime: null + lastTransitionTime: null + status: 'True' + type: ContainersReady + - lastProbeTime: null + lastTransitionTime: '2019-07-08T09:31:18Z' + status: 'True' + type: PodScheduled + containerStatuses: + - containerID: docker://885e82a1ed0b7356541bb410a0126921ac42439607c09875cd8097dd5d7b5376 + image: some-image-name + imageID: docker-pullable://some-image-id + lastState: + terminated: + containerID: docker://d57290f9e00fad626b20d2dd87a3cf69bbc22edae07985374f86a8b2b4e39565 + exitCode: 255 + finishedAt: '2019-07-08T09:39:09Z' + reason: Error + startedAt: '2019-07-08T09:38:54Z' + name: name + ready: true + restartCount: 6 + state: + running: + startedAt: '2019-07-08T09:41:59Z' + hostIP: 10.0.0.1 + phase: Running + podIP: 10.0.0.1 + qosClass: BestEffort + startTime: '2019-07-08T09:31:18Z' diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/scalehandler.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/scalehandler.go new file mode 100644 index 0000000000..48b774cece --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/scalehandler.go @@ -0,0 +1,174 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package managedfields + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/managedfields/internal" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +var ( + scaleGroupVersion = schema.GroupVersion{Group: "autoscaling", Version: "v1"} + replicasPathInScale = fieldpath.MakePathOrDie("spec", "replicas") +) + +// ResourcePathMappings maps a group/version to its replicas path. The +// assumption is that all the paths correspond to leaf fields. +type ResourcePathMappings map[string]fieldpath.Path + +// ScaleHandler manages the conversion of managed fields between a main +// resource and the scale subresource +type ScaleHandler struct { + parentEntries []metav1.ManagedFieldsEntry + groupVersion schema.GroupVersion + mappings ResourcePathMappings +} + +// NewScaleHandler creates a new ScaleHandler +func NewScaleHandler(parentEntries []metav1.ManagedFieldsEntry, groupVersion schema.GroupVersion, mappings ResourcePathMappings) *ScaleHandler { + return &ScaleHandler{ + parentEntries: parentEntries, + groupVersion: groupVersion, + mappings: mappings, + } +} + +// ToSubresource filter the managed fields of the main resource and convert +// them so that they can be handled by scale. +// For the managed fields that have a replicas path it performs two changes: +// 1. APIVersion is changed to the APIVersion of the scale subresource +// 2. Replicas path of the main resource is transformed to the replicas path of +// the scale subresource +func (h *ScaleHandler) ToSubresource() ([]metav1.ManagedFieldsEntry, error) { + managed, err := internal.DecodeManagedFields(h.parentEntries) + if err != nil { + return nil, err + } + + f := fieldpath.ManagedFields{} + t := map[string]*metav1.Time{} + for manager, versionedSet := range managed.Fields() { + path, ok := h.mappings[string(versionedSet.APIVersion())] + // Skip the entry if the APIVersion is unknown + if !ok || path == nil { + continue + } + + if versionedSet.Set().Has(path) { + newVersionedSet := fieldpath.NewVersionedSet( + fieldpath.NewSet(replicasPathInScale), + fieldpath.APIVersion(scaleGroupVersion.String()), + versionedSet.Applied(), + ) + + f[manager] = newVersionedSet + t[manager] = managed.Times()[manager] + } + } + + return managedFieldsEntries(internal.NewManaged(f, t)) +} + +// ToParent merges `scaleEntries` with the entries of the main resource and +// transforms them accordingly +func (h *ScaleHandler) ToParent(scaleEntries []metav1.ManagedFieldsEntry) ([]metav1.ManagedFieldsEntry, error) { + decodedParentEntries, err := internal.DecodeManagedFields(h.parentEntries) + if err != nil { + return nil, err + } + parentFields := decodedParentEntries.Fields() + + decodedScaleEntries, err := internal.DecodeManagedFields(scaleEntries) + if err != nil { + return nil, err + } + scaleFields := decodedScaleEntries.Fields() + + f := fieldpath.ManagedFields{} + t := map[string]*metav1.Time{} + + for manager, versionedSet := range parentFields { + // Get the main resource "replicas" path + path, ok := h.mappings[string(versionedSet.APIVersion())] + // Drop the entry if the APIVersion is unknown. + if !ok { + continue + } + + // If the parent entry does not have the replicas path or it is nil, just + // keep it as it is. The path is nil for Custom Resources without scale + // subresource. + if path == nil || !versionedSet.Set().Has(path) { + f[manager] = versionedSet + t[manager] = decodedParentEntries.Times()[manager] + continue + } + + if _, ok := scaleFields[manager]; !ok { + // "Steal" the replicas path from the main resource entry + newSet := versionedSet.Set().Difference(fieldpath.NewSet(path)) + + if !newSet.Empty() { + newVersionedSet := fieldpath.NewVersionedSet( + newSet, + versionedSet.APIVersion(), + versionedSet.Applied(), + ) + f[manager] = newVersionedSet + t[manager] = decodedParentEntries.Times()[manager] + } + } else { + // Field wasn't stolen, let's keep the entry as it is. + f[manager] = versionedSet + t[manager] = decodedParentEntries.Times()[manager] + delete(scaleFields, manager) + } + } + + for manager, versionedSet := range scaleFields { + if !versionedSet.Set().Has(replicasPathInScale) { + continue + } + newVersionedSet := fieldpath.NewVersionedSet( + fieldpath.NewSet(h.mappings[h.groupVersion.String()]), + fieldpath.APIVersion(h.groupVersion.String()), + versionedSet.Applied(), + ) + f[manager] = newVersionedSet + t[manager] = decodedParentEntries.Times()[manager] + } + + return managedFieldsEntries(internal.NewManaged(f, t)) +} + +func managedFieldsEntries(entries internal.ManagedInterface) ([]metav1.ManagedFieldsEntry, error) { + obj := &unstructured.Unstructured{Object: map[string]interface{}{}} + if err := internal.EncodeObjectManagedFields(obj, entries); err != nil { + return nil, err + } + accessor, err := meta.Accessor(obj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + return accessor.GetManagedFields(), nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/typeconverter.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/typeconverter.go new file mode 100644 index 0000000000..d031eefaa3 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/typeconverter.go @@ -0,0 +1,47 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package managedfields + +import ( + "k8s.io/apimachinery/pkg/util/managedfields/internal" + "k8s.io/kube-openapi/pkg/validation/spec" +) + +// TypeConverter allows you to convert from runtime.Object to +// typed.TypedValue and the other way around. +type TypeConverter = internal.TypeConverter + +// NewDeducedTypeConverter creates a TypeConverter for CRDs that don't +// have a schema. It does implement the same interface though (and +// create the same types of objects), so that everything can still work +// the same. CRDs are merged with all their fields being "atomic" (lists +// included). +func NewDeducedTypeConverter() TypeConverter { + return internal.NewDeducedTypeConverter() +} + +// NewTypeConverter builds a TypeConverter from a map of OpenAPIV3 schemas. +// This will automatically find the proper version of the object, and the +// corresponding schema information. +// The keys to the map must be consistent with the names +// used by Refs within the schemas. +// The schemas should conform to the Kubernetes Structural Schema OpenAPI +// restrictions found in docs: +// https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#specifying-a-structural-schema +func NewTypeConverter(openapiSpec map[string]*spec.Schema, preserveUnknownFields bool) (TypeConverter, error) { + return internal.NewTypeConverter(openapiSpec, preserveUnknownFields) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go index e396275682..a20efd1871 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go +++ b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go @@ -88,8 +88,7 @@ func toYAML(v interface{}) (string, error) { // supports JSON merge patch semantics. // // NOTE: Numbers with different types (e.g. int(0) vs int64(0)) will be detected as conflicts. -// -// Make sure the unmarshaling of left and right are consistent (e.g. use the same library). +// Make sure the unmarshaling of left and right are consistent (e.g. use the same library). func HasConflicts(left, right interface{}) (bool, error) { switch typedLeft := left.(type) { case map[string]interface{}: diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go index d738725caf..3674914f70 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go +++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go @@ -126,14 +126,17 @@ type rudimentaryErrorBackoff struct { // OnError will block if it is called more often than the embedded period time. // This will prevent overly tight hot error loops. func (r *rudimentaryErrorBackoff) OnError(error) { + now := time.Now() // start the timer before acquiring the lock r.lastErrorTimeLock.Lock() - defer r.lastErrorTimeLock.Unlock() - d := time.Since(r.lastErrorTime) - if d < r.minPeriod { - // If the time moves backwards for any reason, do nothing - time.Sleep(r.minPeriod - d) - } + d := now.Sub(r.lastErrorTime) r.lastErrorTime = time.Now() + r.lastErrorTimeLock.Unlock() + + // Do not sleep with the lock held because that causes all callers of HandleError to block. + // We only want the current goroutine to block. + // A negative or zero duration causes time.Sleep to return immediately. + // If the time moves backwards for any reason, do nothing. + time.Sleep(r.minPeriod - d) } // GetCaller returns the caller of the function that calls it. diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/set.go b/vendor/k8s.io/apimachinery/pkg/util/sets/set.go index 99c292feda..d50526f426 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/set.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/set.go @@ -64,6 +64,20 @@ func (s Set[T]) Delete(items ...T) Set[T] { return s } +// Clear empties the set. +// It is preferable to replace the set with a newly constructed set, +// but not all callers can do that (when there are other references to the map). +// In some cases the set *won't* be fully cleared, e.g. a Set[float32] containing NaN +// can't be cleared because NaN can't be removed. +// For sets containing items of a type that is reflexive for ==, +// this is optimized to a single call to runtime.mapclear(). +func (s Set[T]) Clear() Set[T] { + for key := range s { + delete(s, key) + } + return s +} + // Has returns true if and only if item is contained in the set. func (s Set[T]) Has(item T) bool { _, contained := s[item] diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS index 4443bafd13..73244449f2 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS @@ -1,6 +1,7 @@ # See the OWNERS docs at https://go.k8s.io/owners approvers: + - apelisse - pwittrock reviewers: - apelisse diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go index 6fb3697321..3ee683b997 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go @@ -1106,7 +1106,7 @@ func applyRetainKeysDirective(original, patch map[string]interface{}, options Me // Then, sort them by the relative order in setElementOrder, patch list and live list. // The precedence is $setElementOrder > order in patch list > order in live list. // This function will delete the item after merging it to prevent process it again in the future. -// Ref: https://git.k8s.io/community/contributors/design-proposals/cli/preserve-order-in-strategic-merge-patch.md +// Ref: https://git.k8s.io/design-proposals-archive/cli/preserve-order-in-strategic-merge-patch.md func mergePatchIntoOriginal(original, patch map[string]interface{}, schema LookupPatchMeta, mergeOptions MergeOptions) error { for key, patchV := range patch { // Do nothing if there is no ordering directive diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go index e767092dd8..0b8a6cb354 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go @@ -191,7 +191,13 @@ func IsDNS1123Label(value string) []string { errs = append(errs, MaxLenError(DNS1123LabelMaxLength)) } if !dns1123LabelRegexp.MatchString(value) { - errs = append(errs, RegexError(dns1123LabelErrMsg, dns1123LabelFmt, "my-name", "123-abc")) + if dns1123SubdomainRegexp.MatchString(value) { + // It was a valid subdomain and not a valid label. Since we + // already checked length, it must be dots. + errs = append(errs, "must not contain dots") + } else { + errs = append(errs, RegexError(dns1123LabelErrMsg, dns1123LabelFmt, "my-name", "123-abc")) + } } return errs } diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/backoff.go b/vendor/k8s.io/apimachinery/pkg/util/wait/backoff.go new file mode 100644 index 0000000000..4187619256 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/backoff.go @@ -0,0 +1,502 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wait + +import ( + "context" + "math" + "sync" + "time" + + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/utils/clock" +) + +// Backoff holds parameters applied to a Backoff function. +type Backoff struct { + // The initial duration. + Duration time.Duration + // Duration is multiplied by factor each iteration, if factor is not zero + // and the limits imposed by Steps and Cap have not been reached. + // Should not be negative. + // The jitter does not contribute to the updates to the duration parameter. + Factor float64 + // The sleep at each iteration is the duration plus an additional + // amount chosen uniformly at random from the interval between + // zero and `jitter*duration`. + Jitter float64 + // The remaining number of iterations in which the duration + // parameter may change (but progress can be stopped earlier by + // hitting the cap). If not positive, the duration is not + // changed. Used for exponential backoff in combination with + // Factor and Cap. + Steps int + // A limit on revised values of the duration parameter. If a + // multiplication by the factor parameter would make the duration + // exceed the cap then the duration is set to the cap and the + // steps parameter is set to zero. + Cap time.Duration +} + +// Step returns an amount of time to sleep determined by the original +// Duration and Jitter. The backoff is mutated to update its Steps and +// Duration. A nil Backoff always has a zero-duration step. +func (b *Backoff) Step() time.Duration { + if b == nil { + return 0 + } + var nextDuration time.Duration + nextDuration, b.Duration, b.Steps = delay(b.Steps, b.Duration, b.Cap, b.Factor, b.Jitter) + return nextDuration +} + +// DelayFunc returns a function that will compute the next interval to +// wait given the arguments in b. It does not mutate the original backoff +// but the function is safe to use only from a single goroutine. +func (b Backoff) DelayFunc() DelayFunc { + steps := b.Steps + duration := b.Duration + cap := b.Cap + factor := b.Factor + jitter := b.Jitter + + return func() time.Duration { + var nextDuration time.Duration + // jitter is applied per step and is not cumulative over multiple steps + nextDuration, duration, steps = delay(steps, duration, cap, factor, jitter) + return nextDuration + } +} + +// Timer returns a timer implementation appropriate to this backoff's parameters +// for use with wait functions. +func (b Backoff) Timer() Timer { + if b.Steps > 1 || b.Jitter != 0 { + return &variableTimer{new: internalClock.NewTimer, fn: b.DelayFunc()} + } + if b.Duration > 0 { + return &fixedTimer{new: internalClock.NewTicker, interval: b.Duration} + } + return newNoopTimer() +} + +// delay implements the core delay algorithm used in this package. +func delay(steps int, duration, cap time.Duration, factor, jitter float64) (_ time.Duration, next time.Duration, nextSteps int) { + // when steps is non-positive, do not alter the base duration + if steps < 1 { + if jitter > 0 { + return Jitter(duration, jitter), duration, 0 + } + return duration, duration, 0 + } + steps-- + + // calculate the next step's interval + if factor != 0 { + next = time.Duration(float64(duration) * factor) + if cap > 0 && next > cap { + next = cap + steps = 0 + } + } else { + next = duration + } + + // add jitter for this step + if jitter > 0 { + duration = Jitter(duration, jitter) + } + + return duration, next, steps + +} + +// DelayWithReset returns a DelayFunc that will return the appropriate next interval to +// wait. Every resetInterval the backoff parameters are reset to their initial state. +// This method is safe to invoke from multiple goroutines, but all calls will advance +// the backoff state when Factor is set. If Factor is zero, this method is the same as +// invoking b.DelayFunc() since Steps has no impact without Factor. If resetInterval is +// zero no backoff will be performed as the same calling DelayFunc with a zero factor +// and steps. +func (b Backoff) DelayWithReset(c clock.Clock, resetInterval time.Duration) DelayFunc { + if b.Factor <= 0 { + return b.DelayFunc() + } + if resetInterval <= 0 { + b.Steps = 0 + b.Factor = 0 + return b.DelayFunc() + } + return (&backoffManager{ + backoff: b, + initialBackoff: b, + resetInterval: resetInterval, + + clock: c, + lastStart: c.Now(), + timer: nil, + }).Step +} + +// Until loops until stop channel is closed, running f every period. +// +// Until is syntactic sugar on top of JitterUntil with zero jitter factor and +// with sliding = true (which means the timer for period starts after the f +// completes). +func Until(f func(), period time.Duration, stopCh <-chan struct{}) { + JitterUntil(f, period, 0.0, true, stopCh) +} + +// UntilWithContext loops until context is done, running f every period. +// +// UntilWithContext is syntactic sugar on top of JitterUntilWithContext +// with zero jitter factor and with sliding = true (which means the timer +// for period starts after the f completes). +func UntilWithContext(ctx context.Context, f func(context.Context), period time.Duration) { + JitterUntilWithContext(ctx, f, period, 0.0, true) +} + +// NonSlidingUntil loops until stop channel is closed, running f every +// period. +// +// NonSlidingUntil is syntactic sugar on top of JitterUntil with zero jitter +// factor, with sliding = false (meaning the timer for period starts at the same +// time as the function starts). +func NonSlidingUntil(f func(), period time.Duration, stopCh <-chan struct{}) { + JitterUntil(f, period, 0.0, false, stopCh) +} + +// NonSlidingUntilWithContext loops until context is done, running f every +// period. +// +// NonSlidingUntilWithContext is syntactic sugar on top of JitterUntilWithContext +// with zero jitter factor, with sliding = false (meaning the timer for period +// starts at the same time as the function starts). +func NonSlidingUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration) { + JitterUntilWithContext(ctx, f, period, 0.0, false) +} + +// JitterUntil loops until stop channel is closed, running f every period. +// +// If jitterFactor is positive, the period is jittered before every run of f. +// If jitterFactor is not positive, the period is unchanged and not jittered. +// +// If sliding is true, the period is computed after f runs. If it is false then +// period includes the runtime for f. +// +// Close stopCh to stop. f may not be invoked if stop channel is already +// closed. Pass NeverStop to if you don't want it stop. +func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding bool, stopCh <-chan struct{}) { + BackoffUntil(f, NewJitteredBackoffManager(period, jitterFactor, &clock.RealClock{}), sliding, stopCh) +} + +// BackoffUntil loops until stop channel is closed, run f every duration given by BackoffManager. +// +// If sliding is true, the period is computed after f runs. If it is false then +// period includes the runtime for f. +func BackoffUntil(f func(), backoff BackoffManager, sliding bool, stopCh <-chan struct{}) { + var t clock.Timer + for { + select { + case <-stopCh: + return + default: + } + + if !sliding { + t = backoff.Backoff() + } + + func() { + defer runtime.HandleCrash() + f() + }() + + if sliding { + t = backoff.Backoff() + } + + // NOTE: b/c there is no priority selection in golang + // it is possible for this to race, meaning we could + // trigger t.C and stopCh, and t.C select falls through. + // In order to mitigate we re-check stopCh at the beginning + // of every loop to prevent extra executions of f(). + select { + case <-stopCh: + if !t.Stop() { + <-t.C() + } + return + case <-t.C(): + } + } +} + +// JitterUntilWithContext loops until context is done, running f every period. +// +// If jitterFactor is positive, the period is jittered before every run of f. +// If jitterFactor is not positive, the period is unchanged and not jittered. +// +// If sliding is true, the period is computed after f runs. If it is false then +// period includes the runtime for f. +// +// Cancel context to stop. f may not be invoked if context is already expired. +func JitterUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration, jitterFactor float64, sliding bool) { + JitterUntil(func() { f(ctx) }, period, jitterFactor, sliding, ctx.Done()) +} + +// backoffManager provides simple backoff behavior in a threadsafe manner to a caller. +type backoffManager struct { + backoff Backoff + initialBackoff Backoff + resetInterval time.Duration + + clock clock.Clock + + lock sync.Mutex + lastStart time.Time + timer clock.Timer +} + +// Step returns the expected next duration to wait. +func (b *backoffManager) Step() time.Duration { + b.lock.Lock() + defer b.lock.Unlock() + + switch { + case b.resetInterval == 0: + b.backoff = b.initialBackoff + case b.clock.Now().Sub(b.lastStart) > b.resetInterval: + b.backoff = b.initialBackoff + b.lastStart = b.clock.Now() + } + return b.backoff.Step() +} + +// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer +// for exponential backoff. The returned timer must be drained before calling Backoff() the second +// time. +func (b *backoffManager) Backoff() clock.Timer { + b.lock.Lock() + defer b.lock.Unlock() + if b.timer == nil { + b.timer = b.clock.NewTimer(b.Step()) + } else { + b.timer.Reset(b.Step()) + } + return b.timer +} + +// Timer returns a new Timer instance that shares the clock and the reset behavior with all other +// timers. +func (b *backoffManager) Timer() Timer { + return DelayFunc(b.Step).Timer(b.clock) +} + +// BackoffManager manages backoff with a particular scheme based on its underlying implementation. +type BackoffManager interface { + // Backoff returns a shared clock.Timer that is Reset on every invocation. This method is not + // safe for use from multiple threads. It returns a timer for backoff, and caller shall backoff + // until Timer.C() drains. If the second Backoff() is called before the timer from the first + // Backoff() call finishes, the first timer will NOT be drained and result in undetermined + // behavior. + Backoff() clock.Timer +} + +// Deprecated: Will be removed when the legacy polling functions are removed. +type exponentialBackoffManagerImpl struct { + backoff *Backoff + backoffTimer clock.Timer + lastBackoffStart time.Time + initialBackoff time.Duration + backoffResetDuration time.Duration + clock clock.Clock +} + +// NewExponentialBackoffManager returns a manager for managing exponential backoff. Each backoff is jittered and +// backoff will not exceed the given max. If the backoff is not called within resetDuration, the backoff is reset. +// This backoff manager is used to reduce load during upstream unhealthiness. +// +// Deprecated: Will be removed when the legacy Poll methods are removed. Callers should construct a +// Backoff struct, use DelayWithReset() to get a DelayFunc that periodically resets itself, and then +// invoke Timer() when calling wait.BackoffUntil. +// +// Instead of: +// +// bm := wait.NewExponentialBackoffManager(init, max, reset, factor, jitter, clock) +// ... +// wait.BackoffUntil(..., bm.Backoff, ...) +// +// Use: +// +// delayFn := wait.Backoff{ +// Duration: init, +// Cap: max, +// Steps: int(math.Ceil(float64(max) / float64(init))), // now a required argument +// Factor: factor, +// Jitter: jitter, +// }.DelayWithReset(reset, clock) +// wait.BackoffUntil(..., delayFn.Timer(), ...) +func NewExponentialBackoffManager(initBackoff, maxBackoff, resetDuration time.Duration, backoffFactor, jitter float64, c clock.Clock) BackoffManager { + return &exponentialBackoffManagerImpl{ + backoff: &Backoff{ + Duration: initBackoff, + Factor: backoffFactor, + Jitter: jitter, + + // the current impl of wait.Backoff returns Backoff.Duration once steps are used up, which is not + // what we ideally need here, we set it to max int and assume we will never use up the steps + Steps: math.MaxInt32, + Cap: maxBackoff, + }, + backoffTimer: nil, + initialBackoff: initBackoff, + lastBackoffStart: c.Now(), + backoffResetDuration: resetDuration, + clock: c, + } +} + +func (b *exponentialBackoffManagerImpl) getNextBackoff() time.Duration { + if b.clock.Now().Sub(b.lastBackoffStart) > b.backoffResetDuration { + b.backoff.Steps = math.MaxInt32 + b.backoff.Duration = b.initialBackoff + } + b.lastBackoffStart = b.clock.Now() + return b.backoff.Step() +} + +// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for exponential backoff. +// The returned timer must be drained before calling Backoff() the second time +func (b *exponentialBackoffManagerImpl) Backoff() clock.Timer { + if b.backoffTimer == nil { + b.backoffTimer = b.clock.NewTimer(b.getNextBackoff()) + } else { + b.backoffTimer.Reset(b.getNextBackoff()) + } + return b.backoffTimer +} + +// Deprecated: Will be removed when the legacy polling functions are removed. +type jitteredBackoffManagerImpl struct { + clock clock.Clock + duration time.Duration + jitter float64 + backoffTimer clock.Timer +} + +// NewJitteredBackoffManager returns a BackoffManager that backoffs with given duration plus given jitter. If the jitter +// is negative, backoff will not be jittered. +// +// Deprecated: Will be removed when the legacy Poll methods are removed. Callers should construct a +// Backoff struct and invoke Timer() when calling wait.BackoffUntil. +// +// Instead of: +// +// bm := wait.NewJitteredBackoffManager(duration, jitter, clock) +// ... +// wait.BackoffUntil(..., bm.Backoff, ...) +// +// Use: +// +// wait.BackoffUntil(..., wait.Backoff{Duration: duration, Jitter: jitter}.Timer(), ...) +func NewJitteredBackoffManager(duration time.Duration, jitter float64, c clock.Clock) BackoffManager { + return &jitteredBackoffManagerImpl{ + clock: c, + duration: duration, + jitter: jitter, + backoffTimer: nil, + } +} + +func (j *jitteredBackoffManagerImpl) getNextBackoff() time.Duration { + jitteredPeriod := j.duration + if j.jitter > 0.0 { + jitteredPeriod = Jitter(j.duration, j.jitter) + } + return jitteredPeriod +} + +// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for jittered backoff. +// The returned timer must be drained before calling Backoff() the second time +func (j *jitteredBackoffManagerImpl) Backoff() clock.Timer { + backoff := j.getNextBackoff() + if j.backoffTimer == nil { + j.backoffTimer = j.clock.NewTimer(backoff) + } else { + j.backoffTimer.Reset(backoff) + } + return j.backoffTimer +} + +// ExponentialBackoff repeats a condition check with exponential backoff. +// +// It repeatedly checks the condition and then sleeps, using `backoff.Step()` +// to determine the length of the sleep and adjust Duration and Steps. +// Stops and returns as soon as: +// 1. the condition check returns true or an error, +// 2. `backoff.Steps` checks of the condition have been done, or +// 3. a sleep truncated by the cap on duration has been completed. +// In case (1) the returned error is what the condition function returned. +// In all other cases, ErrWaitTimeout is returned. +// +// Since backoffs are often subject to cancellation, we recommend using +// ExponentialBackoffWithContext and passing a context to the method. +func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error { + for backoff.Steps > 0 { + if ok, err := runConditionWithCrashProtection(condition); err != nil || ok { + return err + } + if backoff.Steps == 1 { + break + } + time.Sleep(backoff.Step()) + } + return ErrWaitTimeout +} + +// ExponentialBackoffWithContext repeats a condition check with exponential backoff. +// It immediately returns an error if the condition returns an error, the context is cancelled +// or hits the deadline, or if the maximum attempts defined in backoff is exceeded (ErrWaitTimeout). +// If an error is returned by the condition the backoff stops immediately. The condition will +// never be invoked more than backoff.Steps times. +func ExponentialBackoffWithContext(ctx context.Context, backoff Backoff, condition ConditionWithContextFunc) error { + for backoff.Steps > 0 { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + if ok, err := runConditionWithCrashProtectionWithContext(ctx, condition); err != nil || ok { + return err + } + + if backoff.Steps == 1 { + break + } + + waitBeforeRetry := backoff.Step() + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(waitBeforeRetry): + } + } + + return ErrWaitTimeout +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/delay.go b/vendor/k8s.io/apimachinery/pkg/util/wait/delay.go new file mode 100644 index 0000000000..1d3dcaa74e --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/delay.go @@ -0,0 +1,51 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wait + +import ( + "context" + "sync" + "time" + + "k8s.io/utils/clock" +) + +// DelayFunc returns the next time interval to wait. +type DelayFunc func() time.Duration + +// Timer takes an arbitrary delay function and returns a timer that can handle arbitrary interval changes. +// Use Backoff{...}.Timer() for simple delays and more efficient timers. +func (fn DelayFunc) Timer(c clock.Clock) Timer { + return &variableTimer{fn: fn, new: c.NewTimer} +} + +// Until takes an arbitrary delay function and runs until cancelled or the condition indicates exit. This +// offers all of the functionality of the methods in this package. +func (fn DelayFunc) Until(ctx context.Context, immediate, sliding bool, condition ConditionWithContextFunc) error { + return loopConditionUntilContext(ctx, &variableTimer{fn: fn, new: internalClock.NewTimer}, immediate, sliding, condition) +} + +// Concurrent returns a version of this DelayFunc that is safe for use by multiple goroutines that +// wish to share a single delay timer. +func (fn DelayFunc) Concurrent() DelayFunc { + var lock sync.Mutex + return func() time.Duration { + lock.Lock() + defer lock.Unlock() + return fn() + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/error.go b/vendor/k8s.io/apimachinery/pkg/util/wait/error.go new file mode 100644 index 0000000000..dd75801d82 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/error.go @@ -0,0 +1,96 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wait + +import ( + "context" + "errors" +) + +// ErrWaitTimeout is returned when the condition was not satisfied in time. +// +// Deprecated: This type will be made private in favor of Interrupted() +// for checking errors or ErrorInterrupted(err) for returning a wrapped error. +var ErrWaitTimeout = ErrorInterrupted(errors.New("timed out waiting for the condition")) + +// Interrupted returns true if the error indicates a Poll, ExponentialBackoff, or +// Until loop exited for any reason besides the condition returning true or an +// error. A loop is considered interrupted if the calling context is cancelled, +// the context reaches its deadline, or a backoff reaches its maximum allowed +// steps. +// +// Callers should use this method instead of comparing the error value directly to +// ErrWaitTimeout, as methods that cancel a context may not return that error. +// +// Instead of: +// +// err := wait.Poll(...) +// if err == wait.ErrWaitTimeout { +// log.Infof("Wait for operation exceeded") +// } else ... +// +// Use: +// +// err := wait.Poll(...) +// if wait.Interrupted(err) { +// log.Infof("Wait for operation exceeded") +// } else ... +func Interrupted(err error) bool { + switch { + case errors.Is(err, errWaitTimeout), + errors.Is(err, context.Canceled), + errors.Is(err, context.DeadlineExceeded): + return true + default: + return false + } +} + +// errInterrupted +type errInterrupted struct { + cause error +} + +// ErrorInterrupted returns an error that indicates the wait was ended +// early for a given reason. If no cause is provided a generic error +// will be used but callers are encouraged to provide a real cause for +// clarity in debugging. +func ErrorInterrupted(cause error) error { + switch cause.(type) { + case errInterrupted: + // no need to wrap twice since errInterrupted is only needed + // once in a chain + return cause + default: + return errInterrupted{cause} + } +} + +// errWaitTimeout is the private version of the previous ErrWaitTimeout +// and is private to prevent direct comparison. Use ErrorInterrupted(err) +// to get an error that will return true for Interrupted(err). +var errWaitTimeout = errInterrupted{} + +func (e errInterrupted) Unwrap() error { return e.cause } +func (e errInterrupted) Is(target error) bool { return target == errWaitTimeout } +func (e errInterrupted) Error() string { + if e.cause == nil { + // returns the same error message as historical behavior + return "timed out waiting for the condition" + } + return e.cause.Error() +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go b/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go new file mode 100644 index 0000000000..0dd13c626c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go @@ -0,0 +1,97 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wait + +import ( + "context" + "time" + + "k8s.io/apimachinery/pkg/util/runtime" +) + +// loopConditionUntilContext executes the provided condition at intervals defined by +// the provided timer until the provided context is cancelled, the condition returns +// true, or the condition returns an error. If sliding is true, the period is computed +// after condition runs. If it is false then period includes the runtime for condition. +// If immediate is false the first delay happens before any call to condition, if +// immediate is true the condition will be invoked before waiting and guarantees that +// the condition is invoked at least once, regardless of whether the context has been +// cancelled. The returned error is the error returned by the last condition or the +// context error if the context was terminated. +// +// This is the common loop construct for all polling in the wait package. +func loopConditionUntilContext(ctx context.Context, t Timer, immediate, sliding bool, condition ConditionWithContextFunc) error { + defer t.Stop() + + var timeCh <-chan time.Time + doneCh := ctx.Done() + + // if immediate is true the condition is + // guaranteed to be executed at least once, + // if we haven't requested immediate execution, delay once + if immediate { + if ok, err := func() (bool, error) { + defer runtime.HandleCrash() + return condition(ctx) + }(); err != nil || ok { + return err + } + } else { + timeCh = t.C() + select { + case <-doneCh: + return ctx.Err() + case <-timeCh: + } + } + + for { + // checking ctx.Err() is slightly faster than checking a select + if err := ctx.Err(); err != nil { + return err + } + + if !sliding { + t.Next() + } + if ok, err := func() (bool, error) { + defer runtime.HandleCrash() + return condition(ctx) + }(); err != nil || ok { + return err + } + if sliding { + t.Next() + } + + if timeCh == nil { + timeCh = t.C() + } + + // NOTE: b/c there is no priority selection in golang + // it is possible for this to race, meaning we could + // trigger t.C and doneCh, and t.C select falls through. + // In order to mitigate we re-check doneCh at the beginning + // of every loop to guarantee at-most one extra execution + // of condition. + select { + case <-doneCh: + return ctx.Err() + case <-timeCh: + } + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/poll.go b/vendor/k8s.io/apimachinery/pkg/util/wait/poll.go new file mode 100644 index 0000000000..32e8688ca0 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/poll.go @@ -0,0 +1,315 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wait + +import ( + "context" + "time" +) + +// PollUntilContextCancel tries a condition func until it returns true, an error, or the context +// is cancelled or hits a deadline. condition will be invoked after the first interval if the +// context is not cancelled first. The returned error will be from ctx.Err(), the condition's +// err return value, or nil. If invoking condition takes longer than interval the next condition +// will be invoked immediately. When using very short intervals, condition may be invoked multiple +// times before a context cancellation is detected. If immediate is true, condition will be +// invoked before waiting and guarantees that condition is invoked at least once, regardless of +// whether the context has been cancelled. +func PollUntilContextCancel(ctx context.Context, interval time.Duration, immediate bool, condition ConditionWithContextFunc) error { + return loopConditionUntilContext(ctx, Backoff{Duration: interval}.Timer(), immediate, false, condition) +} + +// PollUntilContextTimeout will terminate polling after timeout duration by setting a context +// timeout. This is provided as a convenience function for callers not currently executing under +// a deadline and is equivalent to: +// +// deadlineCtx, deadlineCancel := context.WithTimeout(ctx, timeout) +// err := PollUntilContextCancel(ctx, interval, immediate, condition) +// +// The deadline context will be cancelled if the Poll succeeds before the timeout, simplifying +// inline usage. All other behavior is identical to PollWithContextTimeout. +func PollUntilContextTimeout(ctx context.Context, interval, timeout time.Duration, immediate bool, condition ConditionWithContextFunc) error { + deadlineCtx, deadlineCancel := context.WithTimeout(ctx, timeout) + defer deadlineCancel() + return loopConditionUntilContext(deadlineCtx, Backoff{Duration: interval}.Timer(), immediate, false, condition) +} + +// Poll tries a condition func until it returns true, an error, or the timeout +// is reached. +// +// Poll always waits the interval before the run of 'condition'. +// 'condition' will always be invoked at least once. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// If you want to Poll something forever, see PollInfinite. +// +// Deprecated: This method does not return errors from context, use PollWithContextTimeout. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func Poll(interval, timeout time.Duration, condition ConditionFunc) error { + return PollWithContext(context.Background(), interval, timeout, condition.WithContext()) +} + +// PollWithContext tries a condition func until it returns true, an error, +// or when the context expires or the timeout is reached, whichever +// happens first. +// +// PollWithContext always waits the interval before the run of 'condition'. +// 'condition' will always be invoked at least once. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// If you want to Poll something forever, see PollInfinite. +// +// Deprecated: This method does not return errors from context, use PollWithContextTimeout. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollWithContext(ctx context.Context, interval, timeout time.Duration, condition ConditionWithContextFunc) error { + return poll(ctx, false, poller(interval, timeout), condition) +} + +// PollUntil tries a condition func until it returns true, an error or stopCh is +// closed. +// +// PollUntil always waits interval before the first run of 'condition'. +// 'condition' will always be invoked at least once. +// +// Deprecated: This method does not return errors from context, use PollWithContextCancel. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error { + return PollUntilWithContext(ContextForChannel(stopCh), interval, condition.WithContext()) +} + +// PollUntilWithContext tries a condition func until it returns true, +// an error or the specified context is cancelled or expired. +// +// PollUntilWithContext always waits interval before the first run of 'condition'. +// 'condition' will always be invoked at least once. +// +// Deprecated: This method does not return errors from context, use PollWithContextCancel. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollUntilWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { + return poll(ctx, false, poller(interval, 0), condition) +} + +// PollInfinite tries a condition func until it returns true or an error +// +// PollInfinite always waits the interval before the run of 'condition'. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// Deprecated: This method does not return errors from context, use PollWithContextCancel. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollInfinite(interval time.Duration, condition ConditionFunc) error { + return PollInfiniteWithContext(context.Background(), interval, condition.WithContext()) +} + +// PollInfiniteWithContext tries a condition func until it returns true or an error +// +// PollInfiniteWithContext always waits the interval before the run of 'condition'. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// Deprecated: This method does not return errors from context, use PollWithContextCancel. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollInfiniteWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { + return poll(ctx, false, poller(interval, 0), condition) +} + +// PollImmediate tries a condition func until it returns true, an error, or the timeout +// is reached. +// +// PollImmediate always checks 'condition' before waiting for the interval. 'condition' +// will always be invoked at least once. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// If you want to immediately Poll something forever, see PollImmediateInfinite. +// +// Deprecated: This method does not return errors from context, use PollWithContextTimeout. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollImmediate(interval, timeout time.Duration, condition ConditionFunc) error { + return PollImmediateWithContext(context.Background(), interval, timeout, condition.WithContext()) +} + +// PollImmediateWithContext tries a condition func until it returns true, an error, +// or the timeout is reached or the specified context expires, whichever happens first. +// +// PollImmediateWithContext always checks 'condition' before waiting for the interval. +// 'condition' will always be invoked at least once. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// If you want to immediately Poll something forever, see PollImmediateInfinite. +// +// Deprecated: This method does not return errors from context, use PollWithContextTimeout. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollImmediateWithContext(ctx context.Context, interval, timeout time.Duration, condition ConditionWithContextFunc) error { + return poll(ctx, true, poller(interval, timeout), condition) +} + +// PollImmediateUntil tries a condition func until it returns true, an error or stopCh is closed. +// +// PollImmediateUntil runs the 'condition' before waiting for the interval. +// 'condition' will always be invoked at least once. +// +// Deprecated: This method does not return errors from context, use PollWithContextCancel. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollImmediateUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error { + return PollImmediateUntilWithContext(ContextForChannel(stopCh), interval, condition.WithContext()) +} + +// PollImmediateUntilWithContext tries a condition func until it returns true, +// an error or the specified context is cancelled or expired. +// +// PollImmediateUntilWithContext runs the 'condition' before waiting for the interval. +// 'condition' will always be invoked at least once. +// +// Deprecated: This method does not return errors from context, use PollWithContextCancel. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollImmediateUntilWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { + return poll(ctx, true, poller(interval, 0), condition) +} + +// PollImmediateInfinite tries a condition func until it returns true or an error +// +// PollImmediateInfinite runs the 'condition' before waiting for the interval. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// Deprecated: This method does not return errors from context, use PollWithContextCancel. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) error { + return PollImmediateInfiniteWithContext(context.Background(), interval, condition.WithContext()) +} + +// PollImmediateInfiniteWithContext tries a condition func until it returns true +// or an error or the specified context gets cancelled or expired. +// +// PollImmediateInfiniteWithContext runs the 'condition' before waiting for the interval. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// Deprecated: This method does not return errors from context, use PollWithContextCancel. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollImmediateInfiniteWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { + return poll(ctx, true, poller(interval, 0), condition) +} + +// Internally used, each of the public 'Poll*' function defined in this +// package should invoke this internal function with appropriate parameters. +// ctx: the context specified by the caller, for infinite polling pass +// a context that never gets cancelled or expired. +// immediate: if true, the 'condition' will be invoked before waiting for the interval, +// in this case 'condition' will always be invoked at least once. +// wait: user specified WaitFunc function that controls at what interval the condition +// function should be invoked periodically and whether it is bound by a timeout. +// condition: user specified ConditionWithContextFunc function. +// +// Deprecated: will be removed in favor of loopConditionUntilContext. +func poll(ctx context.Context, immediate bool, wait waitWithContextFunc, condition ConditionWithContextFunc) error { + if immediate { + done, err := runConditionWithCrashProtectionWithContext(ctx, condition) + if err != nil { + return err + } + if done { + return nil + } + } + + select { + case <-ctx.Done(): + // returning ctx.Err() will break backward compatibility, use new PollUntilContext* + // methods instead + return ErrWaitTimeout + default: + return waitForWithContext(ctx, wait, condition) + } +} + +// poller returns a WaitFunc that will send to the channel every interval until +// timeout has elapsed and then closes the channel. +// +// Over very short intervals you may receive no ticks before the channel is +// closed. A timeout of 0 is interpreted as an infinity, and in such a case +// it would be the caller's responsibility to close the done channel. +// Failure to do so would result in a leaked goroutine. +// +// Output ticks are not buffered. If the channel is not ready to receive an +// item, the tick is skipped. +// +// Deprecated: Will be removed in a future release. +func poller(interval, timeout time.Duration) waitWithContextFunc { + return waitWithContextFunc(func(ctx context.Context) <-chan struct{} { + ch := make(chan struct{}) + + go func() { + defer close(ch) + + tick := time.NewTicker(interval) + defer tick.Stop() + + var after <-chan time.Time + if timeout != 0 { + // time.After is more convenient, but it + // potentially leaves timers around much longer + // than necessary if we exit early. + timer := time.NewTimer(timeout) + after = timer.C + defer timer.Stop() + } + + for { + select { + case <-tick.C: + // If the consumer isn't ready for this signal drop it and + // check the other channels. + select { + case ch <- struct{}{}: + default: + } + case <-after: + return + case <-ctx.Done(): + return + } + } + }() + + return ch + }) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/timer.go b/vendor/k8s.io/apimachinery/pkg/util/wait/timer.go new file mode 100644 index 0000000000..3efba32132 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/timer.go @@ -0,0 +1,121 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wait + +import ( + "time" + + "k8s.io/utils/clock" +) + +// Timer abstracts how wait functions interact with time runtime efficiently. Test +// code may implement this interface directly but package consumers are encouraged +// to use the Backoff type as the primary mechanism for acquiring a Timer. The +// interface is a simplification of clock.Timer to prevent misuse. Timers are not +// expected to be safe for calls from multiple goroutines. +type Timer interface { + // C returns a channel that will receive a struct{} each time the timer fires. + // The channel should not be waited on after Stop() is invoked. It is allowed + // to cache the returned value of C() for the lifetime of the Timer. + C() <-chan time.Time + // Next is invoked by wait functions to signal timers that the next interval + // should begin. You may only use Next() if you have drained the channel C(). + // You should not call Next() after Stop() is invoked. + Next() + // Stop releases the timer. It is safe to invoke if no other methods have been + // called. + Stop() +} + +type noopTimer struct { + closedCh <-chan time.Time +} + +// newNoopTimer creates a timer with a unique channel to avoid contention +// for the channel's lock across multiple unrelated timers. +func newNoopTimer() noopTimer { + ch := make(chan time.Time) + close(ch) + return noopTimer{closedCh: ch} +} + +func (t noopTimer) C() <-chan time.Time { + return t.closedCh +} +func (noopTimer) Next() {} +func (noopTimer) Stop() {} + +type variableTimer struct { + fn DelayFunc + t clock.Timer + new func(time.Duration) clock.Timer +} + +func (t *variableTimer) C() <-chan time.Time { + if t.t == nil { + d := t.fn() + t.t = t.new(d) + } + return t.t.C() +} +func (t *variableTimer) Next() { + if t.t == nil { + return + } + d := t.fn() + t.t.Reset(d) +} +func (t *variableTimer) Stop() { + if t.t == nil { + return + } + t.t.Stop() + t.t = nil +} + +type fixedTimer struct { + interval time.Duration + t clock.Ticker + new func(time.Duration) clock.Ticker +} + +func (t *fixedTimer) C() <-chan time.Time { + if t.t == nil { + t.t = t.new(t.interval) + } + return t.t.C() +} +func (t *fixedTimer) Next() { + // no-op for fixed timers +} +func (t *fixedTimer) Stop() { + if t.t == nil { + return + } + t.t.Stop() + t.t = nil +} + +var ( + // RealTimer can be passed to methods that need a clock.Timer. + RealTimer = clock.RealClock{}.NewTimer +) + +var ( + // internalClock is used for test injection of clocks + internalClock = clock.RealClock{} +) diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go index 137627b405..6805e8cf94 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go @@ -18,14 +18,11 @@ package wait import ( "context" - "errors" - "math" "math/rand" "sync" "time" "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/utils/clock" ) // For any test of the style: @@ -83,113 +80,6 @@ func Forever(f func(), period time.Duration) { Until(f, period, NeverStop) } -// Until loops until stop channel is closed, running f every period. -// -// Until is syntactic sugar on top of JitterUntil with zero jitter factor and -// with sliding = true (which means the timer for period starts after the f -// completes). -func Until(f func(), period time.Duration, stopCh <-chan struct{}) { - JitterUntil(f, period, 0.0, true, stopCh) -} - -// UntilWithContext loops until context is done, running f every period. -// -// UntilWithContext is syntactic sugar on top of JitterUntilWithContext -// with zero jitter factor and with sliding = true (which means the timer -// for period starts after the f completes). -func UntilWithContext(ctx context.Context, f func(context.Context), period time.Duration) { - JitterUntilWithContext(ctx, f, period, 0.0, true) -} - -// NonSlidingUntil loops until stop channel is closed, running f every -// period. -// -// NonSlidingUntil is syntactic sugar on top of JitterUntil with zero jitter -// factor, with sliding = false (meaning the timer for period starts at the same -// time as the function starts). -func NonSlidingUntil(f func(), period time.Duration, stopCh <-chan struct{}) { - JitterUntil(f, period, 0.0, false, stopCh) -} - -// NonSlidingUntilWithContext loops until context is done, running f every -// period. -// -// NonSlidingUntilWithContext is syntactic sugar on top of JitterUntilWithContext -// with zero jitter factor, with sliding = false (meaning the timer for period -// starts at the same time as the function starts). -func NonSlidingUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration) { - JitterUntilWithContext(ctx, f, period, 0.0, false) -} - -// JitterUntil loops until stop channel is closed, running f every period. -// -// If jitterFactor is positive, the period is jittered before every run of f. -// If jitterFactor is not positive, the period is unchanged and not jittered. -// -// If sliding is true, the period is computed after f runs. If it is false then -// period includes the runtime for f. -// -// Close stopCh to stop. f may not be invoked if stop channel is already -// closed. Pass NeverStop to if you don't want it stop. -func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding bool, stopCh <-chan struct{}) { - BackoffUntil(f, NewJitteredBackoffManager(period, jitterFactor, &clock.RealClock{}), sliding, stopCh) -} - -// BackoffUntil loops until stop channel is closed, run f every duration given by BackoffManager. -// -// If sliding is true, the period is computed after f runs. If it is false then -// period includes the runtime for f. -func BackoffUntil(f func(), backoff BackoffManager, sliding bool, stopCh <-chan struct{}) { - var t clock.Timer - for { - select { - case <-stopCh: - return - default: - } - - if !sliding { - t = backoff.Backoff() - } - - func() { - defer runtime.HandleCrash() - f() - }() - - if sliding { - t = backoff.Backoff() - } - - // NOTE: b/c there is no priority selection in golang - // it is possible for this to race, meaning we could - // trigger t.C and stopCh, and t.C select falls through. - // In order to mitigate we re-check stopCh at the beginning - // of every loop to prevent extra executions of f(). - select { - case <-stopCh: - if !t.Stop() { - <-t.C() - } - return - case <-t.C(): - } - } -} - -// JitterUntilWithContext loops until context is done, running f every period. -// -// If jitterFactor is positive, the period is jittered before every run of f. -// If jitterFactor is not positive, the period is unchanged and not jittered. -// -// If sliding is true, the period is computed after f runs. If it is false then -// period includes the runtime for f. -// -// Cancel context to stop. f may not be invoked if context is already expired. -func JitterUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration, jitterFactor float64, sliding bool) { - JitterUntil(func() { f(ctx) }, period, jitterFactor, sliding, ctx.Done()) -} - // Jitter returns a time.Duration between duration and duration + maxFactor * // duration. // @@ -203,9 +93,6 @@ func Jitter(duration time.Duration, maxFactor float64) time.Duration { return wait } -// ErrWaitTimeout is returned when the condition exited without success. -var ErrWaitTimeout = errors.New("timed out waiting for the condition") - // ConditionFunc returns true if the condition is satisfied, or an error // if the loop should be aborted. type ConditionFunc func() (done bool, err error) @@ -223,425 +110,80 @@ func (cf ConditionFunc) WithContext() ConditionWithContextFunc { } } -// runConditionWithCrashProtection runs a ConditionFunc with crash protection -func runConditionWithCrashProtection(condition ConditionFunc) (bool, error) { - return runConditionWithCrashProtectionWithContext(context.TODO(), condition.WithContext()) -} - -// runConditionWithCrashProtectionWithContext runs a -// ConditionWithContextFunc with crash protection. -func runConditionWithCrashProtectionWithContext(ctx context.Context, condition ConditionWithContextFunc) (bool, error) { - defer runtime.HandleCrash() - return condition(ctx) -} - -// Backoff holds parameters applied to a Backoff function. -type Backoff struct { - // The initial duration. - Duration time.Duration - // Duration is multiplied by factor each iteration, if factor is not zero - // and the limits imposed by Steps and Cap have not been reached. - // Should not be negative. - // The jitter does not contribute to the updates to the duration parameter. - Factor float64 - // The sleep at each iteration is the duration plus an additional - // amount chosen uniformly at random from the interval between - // zero and `jitter*duration`. - Jitter float64 - // The remaining number of iterations in which the duration - // parameter may change (but progress can be stopped earlier by - // hitting the cap). If not positive, the duration is not - // changed. Used for exponential backoff in combination with - // Factor and Cap. - Steps int - // A limit on revised values of the duration parameter. If a - // multiplication by the factor parameter would make the duration - // exceed the cap then the duration is set to the cap and the - // steps parameter is set to zero. - Cap time.Duration -} - -// Step (1) returns an amount of time to sleep determined by the -// original Duration and Jitter and (2) mutates the provided Backoff -// to update its Steps and Duration. -func (b *Backoff) Step() time.Duration { - if b.Steps < 1 { - if b.Jitter > 0 { - return Jitter(b.Duration, b.Jitter) - } - return b.Duration - } - b.Steps-- - - duration := b.Duration - - // calculate the next step - if b.Factor != 0 { - b.Duration = time.Duration(float64(b.Duration) * b.Factor) - if b.Cap > 0 && b.Duration > b.Cap { - b.Duration = b.Cap - b.Steps = 0 - } - } - - if b.Jitter > 0 { - duration = Jitter(duration, b.Jitter) - } - return duration -} - -// ContextForChannel derives a child context from a parent channel. -// -// The derived context's Done channel is closed when the returned cancel function -// is called or when the parent channel is closed, whichever happens first. -// -// Note the caller must *always* call the CancelFunc, otherwise resources may be leaked. -func ContextForChannel(parentCh <-chan struct{}) (context.Context, context.CancelFunc) { - ctx, cancel := context.WithCancel(context.Background()) - - go func() { - select { - case <-parentCh: - cancel() - case <-ctx.Done(): - } - }() - return ctx, cancel -} - -// BackoffManager manages backoff with a particular scheme based on its underlying implementation. It provides -// an interface to return a timer for backoff, and caller shall backoff until Timer.C() drains. If the second Backoff() -// is called before the timer from the first Backoff() call finishes, the first timer will NOT be drained and result in -// undetermined behavior. -// The BackoffManager is supposed to be called in a single-threaded environment. -type BackoffManager interface { - Backoff() clock.Timer -} - -type exponentialBackoffManagerImpl struct { - backoff *Backoff - backoffTimer clock.Timer - lastBackoffStart time.Time - initialBackoff time.Duration - backoffResetDuration time.Duration - clock clock.Clock -} - -// NewExponentialBackoffManager returns a manager for managing exponential backoff. Each backoff is jittered and -// backoff will not exceed the given max. If the backoff is not called within resetDuration, the backoff is reset. -// This backoff manager is used to reduce load during upstream unhealthiness. -func NewExponentialBackoffManager(initBackoff, maxBackoff, resetDuration time.Duration, backoffFactor, jitter float64, c clock.Clock) BackoffManager { - return &exponentialBackoffManagerImpl{ - backoff: &Backoff{ - Duration: initBackoff, - Factor: backoffFactor, - Jitter: jitter, - - // the current impl of wait.Backoff returns Backoff.Duration once steps are used up, which is not - // what we ideally need here, we set it to max int and assume we will never use up the steps - Steps: math.MaxInt32, - Cap: maxBackoff, - }, - backoffTimer: nil, - initialBackoff: initBackoff, - lastBackoffStart: c.Now(), - backoffResetDuration: resetDuration, - clock: c, - } -} - -func (b *exponentialBackoffManagerImpl) getNextBackoff() time.Duration { - if b.clock.Now().Sub(b.lastBackoffStart) > b.backoffResetDuration { - b.backoff.Steps = math.MaxInt32 - b.backoff.Duration = b.initialBackoff - } - b.lastBackoffStart = b.clock.Now() - return b.backoff.Step() -} - -// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for exponential backoff. -// The returned timer must be drained before calling Backoff() the second time -func (b *exponentialBackoffManagerImpl) Backoff() clock.Timer { - if b.backoffTimer == nil { - b.backoffTimer = b.clock.NewTimer(b.getNextBackoff()) - } else { - b.backoffTimer.Reset(b.getNextBackoff()) - } - return b.backoffTimer -} - -type jitteredBackoffManagerImpl struct { - clock clock.Clock - duration time.Duration - jitter float64 - backoffTimer clock.Timer -} - -// NewJitteredBackoffManager returns a BackoffManager that backoffs with given duration plus given jitter. If the jitter -// is negative, backoff will not be jittered. -func NewJitteredBackoffManager(duration time.Duration, jitter float64, c clock.Clock) BackoffManager { - return &jitteredBackoffManagerImpl{ - clock: c, - duration: duration, - jitter: jitter, - backoffTimer: nil, - } -} - -func (j *jitteredBackoffManagerImpl) getNextBackoff() time.Duration { - jitteredPeriod := j.duration - if j.jitter > 0.0 { - jitteredPeriod = Jitter(j.duration, j.jitter) - } - return jitteredPeriod +// ContextForChannel provides a context that will be treated as cancelled +// when the provided parentCh is closed. The implementation returns +// context.Canceled for Err() if and only if the parentCh is closed. +func ContextForChannel(parentCh <-chan struct{}) context.Context { + return channelContext{stopCh: parentCh} } -// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for jittered backoff. -// The returned timer must be drained before calling Backoff() the second time -func (j *jitteredBackoffManagerImpl) Backoff() clock.Timer { - backoff := j.getNextBackoff() - if j.backoffTimer == nil { - j.backoffTimer = j.clock.NewTimer(backoff) - } else { - j.backoffTimer.Reset(backoff) - } - return j.backoffTimer -} +var _ context.Context = channelContext{} -// ExponentialBackoff repeats a condition check with exponential backoff. -// -// It repeatedly checks the condition and then sleeps, using `backoff.Step()` -// to determine the length of the sleep and adjust Duration and Steps. -// Stops and returns as soon as: -// 1. the condition check returns true or an error, -// 2. `backoff.Steps` checks of the condition have been done, or -// 3. a sleep truncated by the cap on duration has been completed. -// In case (1) the returned error is what the condition function returned. -// In all other cases, ErrWaitTimeout is returned. -func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error { - for backoff.Steps > 0 { - if ok, err := runConditionWithCrashProtection(condition); err != nil || ok { - return err - } - if backoff.Steps == 1 { - break - } - time.Sleep(backoff.Step()) - } - return ErrWaitTimeout -} - -// Poll tries a condition func until it returns true, an error, or the timeout -// is reached. -// -// Poll always waits the interval before the run of 'condition'. -// 'condition' will always be invoked at least once. -// -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -// -// If you want to Poll something forever, see PollInfinite. -func Poll(interval, timeout time.Duration, condition ConditionFunc) error { - return PollWithContext(context.Background(), interval, timeout, condition.WithContext()) -} - -// PollWithContext tries a condition func until it returns true, an error, -// or when the context expires or the timeout is reached, whichever -// happens first. -// -// PollWithContext always waits the interval before the run of 'condition'. -// 'condition' will always be invoked at least once. -// -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -// -// If you want to Poll something forever, see PollInfinite. -func PollWithContext(ctx context.Context, interval, timeout time.Duration, condition ConditionWithContextFunc) error { - return poll(ctx, false, poller(interval, timeout), condition) -} - -// PollUntil tries a condition func until it returns true, an error or stopCh is +// channelContext will behave as if the context were cancelled when stopCh is // closed. -// -// PollUntil always waits interval before the first run of 'condition'. -// 'condition' will always be invoked at least once. -func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error { - ctx, cancel := ContextForChannel(stopCh) - defer cancel() - return PollUntilWithContext(ctx, interval, condition.WithContext()) -} - -// PollUntilWithContext tries a condition func until it returns true, -// an error or the specified context is cancelled or expired. -// -// PollUntilWithContext always waits interval before the first run of 'condition'. -// 'condition' will always be invoked at least once. -func PollUntilWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { - return poll(ctx, false, poller(interval, 0), condition) -} - -// PollInfinite tries a condition func until it returns true or an error -// -// PollInfinite always waits the interval before the run of 'condition'. -// -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -func PollInfinite(interval time.Duration, condition ConditionFunc) error { - return PollInfiniteWithContext(context.Background(), interval, condition.WithContext()) -} - -// PollInfiniteWithContext tries a condition func until it returns true or an error -// -// PollInfiniteWithContext always waits the interval before the run of 'condition'. -// -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -func PollInfiniteWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { - return poll(ctx, false, poller(interval, 0), condition) +type channelContext struct { + stopCh <-chan struct{} } -// PollImmediate tries a condition func until it returns true, an error, or the timeout -// is reached. -// -// PollImmediate always checks 'condition' before waiting for the interval. 'condition' -// will always be invoked at least once. -// -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -// -// If you want to immediately Poll something forever, see PollImmediateInfinite. -func PollImmediate(interval, timeout time.Duration, condition ConditionFunc) error { - return PollImmediateWithContext(context.Background(), interval, timeout, condition.WithContext()) -} - -// PollImmediateWithContext tries a condition func until it returns true, an error, -// or the timeout is reached or the specified context expires, whichever happens first. -// -// PollImmediateWithContext always checks 'condition' before waiting for the interval. -// 'condition' will always be invoked at least once. -// -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -// -// If you want to immediately Poll something forever, see PollImmediateInfinite. -func PollImmediateWithContext(ctx context.Context, interval, timeout time.Duration, condition ConditionWithContextFunc) error { - return poll(ctx, true, poller(interval, timeout), condition) -} - -// PollImmediateUntil tries a condition func until it returns true, an error or stopCh is closed. -// -// PollImmediateUntil runs the 'condition' before waiting for the interval. -// 'condition' will always be invoked at least once. -func PollImmediateUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error { - ctx, cancel := ContextForChannel(stopCh) - defer cancel() - return PollImmediateUntilWithContext(ctx, interval, condition.WithContext()) -} - -// PollImmediateUntilWithContext tries a condition func until it returns true, -// an error or the specified context is cancelled or expired. -// -// PollImmediateUntilWithContext runs the 'condition' before waiting for the interval. -// 'condition' will always be invoked at least once. -func PollImmediateUntilWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { - return poll(ctx, true, poller(interval, 0), condition) +func (c channelContext) Done() <-chan struct{} { return c.stopCh } +func (c channelContext) Err() error { + select { + case <-c.stopCh: + return context.Canceled + default: + return nil + } } +func (c channelContext) Deadline() (time.Time, bool) { return time.Time{}, false } +func (c channelContext) Value(key any) any { return nil } -// PollImmediateInfinite tries a condition func until it returns true or an error +// runConditionWithCrashProtection runs a ConditionFunc with crash protection. // -// PollImmediateInfinite runs the 'condition' before waiting for the interval. -// -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) error { - return PollImmediateInfiniteWithContext(context.Background(), interval, condition.WithContext()) +// Deprecated: Will be removed when the legacy polling methods are removed. +func runConditionWithCrashProtection(condition ConditionFunc) (bool, error) { + defer runtime.HandleCrash() + return condition() } -// PollImmediateInfiniteWithContext tries a condition func until it returns true -// or an error or the specified context gets cancelled or expired. +// runConditionWithCrashProtectionWithContext runs a ConditionWithContextFunc +// with crash protection. // -// PollImmediateInfiniteWithContext runs the 'condition' before waiting for the interval. -// -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -func PollImmediateInfiniteWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { - return poll(ctx, true, poller(interval, 0), condition) -} - -// Internally used, each of the public 'Poll*' function defined in this -// package should invoke this internal function with appropriate parameters. -// ctx: the context specified by the caller, for infinite polling pass -// a context that never gets cancelled or expired. -// immediate: if true, the 'condition' will be invoked before waiting for the interval, -// in this case 'condition' will always be invoked at least once. -// wait: user specified WaitFunc function that controls at what interval the condition -// function should be invoked periodically and whether it is bound by a timeout. -// condition: user specified ConditionWithContextFunc function. -func poll(ctx context.Context, immediate bool, wait WaitWithContextFunc, condition ConditionWithContextFunc) error { - if immediate { - done, err := runConditionWithCrashProtectionWithContext(ctx, condition) - if err != nil { - return err - } - if done { - return nil - } - } - - select { - case <-ctx.Done(): - // returning ctx.Err() will break backward compatibility - return ErrWaitTimeout - default: - return WaitForWithContext(ctx, wait, condition) - } +// Deprecated: Will be removed when the legacy polling methods are removed. +func runConditionWithCrashProtectionWithContext(ctx context.Context, condition ConditionWithContextFunc) (bool, error) { + defer runtime.HandleCrash() + return condition(ctx) } -// WaitFunc creates a channel that receives an item every time a test +// waitFunc creates a channel that receives an item every time a test // should be executed and is closed when the last test should be invoked. -type WaitFunc func(done <-chan struct{}) <-chan struct{} +// +// Deprecated: Will be removed in a future release in favor of +// loopConditionUntilContext. +type waitFunc func(done <-chan struct{}) <-chan struct{} // WithContext converts the WaitFunc to an equivalent WaitWithContextFunc -func (w WaitFunc) WithContext() WaitWithContextFunc { +func (w waitFunc) WithContext() waitWithContextFunc { return func(ctx context.Context) <-chan struct{} { return w(ctx.Done()) } } -// WaitWithContextFunc creates a channel that receives an item every time a test +// waitWithContextFunc creates a channel that receives an item every time a test // should be executed and is closed when the last test should be invoked. // // When the specified context gets cancelled or expires the function // stops sending item and returns immediately. -type WaitWithContextFunc func(ctx context.Context) <-chan struct{} - -// WaitFor continually checks 'fn' as driven by 'wait'. // -// WaitFor gets a channel from 'wait()”, and then invokes 'fn' once for every value -// placed on the channel and once more when the channel is closed. If the channel is closed -// and 'fn' returns false without error, WaitFor returns ErrWaitTimeout. -// -// If 'fn' returns an error the loop ends and that error is returned. If -// 'fn' returns true the loop ends and nil is returned. -// -// ErrWaitTimeout will be returned if the 'done' channel is closed without fn ever -// returning true. -// -// When the done channel is closed, because the golang `select` statement is -// "uniform pseudo-random", the `fn` might still run one or multiple time, -// though eventually `WaitFor` will return. -func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error { - ctx, cancel := ContextForChannel(done) - defer cancel() - return WaitForWithContext(ctx, wait.WithContext(), fn.WithContext()) -} +// Deprecated: Will be removed in a future release in favor of +// loopConditionUntilContext. +type waitWithContextFunc func(ctx context.Context) <-chan struct{} -// WaitForWithContext continually checks 'fn' as driven by 'wait'. +// waitForWithContext continually checks 'fn' as driven by 'wait'. // -// WaitForWithContext gets a channel from 'wait()”, and then invokes 'fn' +// waitForWithContext gets a channel from 'wait()”, and then invokes 'fn' // once for every value placed on the channel and once more when the // channel is closed. If the channel is closed and 'fn' -// returns false without error, WaitForWithContext returns ErrWaitTimeout. +// returns false without error, waitForWithContext returns ErrWaitTimeout. // // If 'fn' returns an error the loop ends and that error is returned. If // 'fn' returns true the loop ends and nil is returned. @@ -651,8 +193,11 @@ func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error { // // When the ctx.Done() channel is closed, because the golang `select` statement is // "uniform pseudo-random", the `fn` might still run one or multiple times, -// though eventually `WaitForWithContext` will return. -func WaitForWithContext(ctx context.Context, wait WaitWithContextFunc, fn ConditionWithContextFunc) error { +// though eventually `waitForWithContext` will return. +// +// Deprecated: Will be removed in a future release in favor of +// loopConditionUntilContext. +func waitForWithContext(ctx context.Context, wait waitWithContextFunc, fn ConditionWithContextFunc) error { waitCtx, cancel := context.WithCancel(context.Background()) defer cancel() c := wait(waitCtx) @@ -670,88 +215,9 @@ func WaitForWithContext(ctx context.Context, wait WaitWithContextFunc, fn Condit return ErrWaitTimeout } case <-ctx.Done(): - // returning ctx.Err() will break backward compatibility + // returning ctx.Err() will break backward compatibility, use new PollUntilContext* + // methods instead return ErrWaitTimeout } } } - -// poller returns a WaitFunc that will send to the channel every interval until -// timeout has elapsed and then closes the channel. -// -// Over very short intervals you may receive no ticks before the channel is -// closed. A timeout of 0 is interpreted as an infinity, and in such a case -// it would be the caller's responsibility to close the done channel. -// Failure to do so would result in a leaked goroutine. -// -// Output ticks are not buffered. If the channel is not ready to receive an -// item, the tick is skipped. -func poller(interval, timeout time.Duration) WaitWithContextFunc { - return WaitWithContextFunc(func(ctx context.Context) <-chan struct{} { - ch := make(chan struct{}) - - go func() { - defer close(ch) - - tick := time.NewTicker(interval) - defer tick.Stop() - - var after <-chan time.Time - if timeout != 0 { - // time.After is more convenient, but it - // potentially leaves timers around much longer - // than necessary if we exit early. - timer := time.NewTimer(timeout) - after = timer.C - defer timer.Stop() - } - - for { - select { - case <-tick.C: - // If the consumer isn't ready for this signal drop it and - // check the other channels. - select { - case ch <- struct{}{}: - default: - } - case <-after: - return - case <-ctx.Done(): - return - } - } - }() - - return ch - }) -} - -// ExponentialBackoffWithContext works with a request context and a Backoff. It ensures that the retry wait never -// exceeds the deadline specified by the request context. -func ExponentialBackoffWithContext(ctx context.Context, backoff Backoff, condition ConditionFunc) error { - for backoff.Steps > 0 { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - if ok, err := runConditionWithCrashProtection(condition); err != nil || ok { - return err - } - - if backoff.Steps == 1 { - break - } - - waitBeforeRetry := backoff.Step() - select { - case <-ctx.Done(): - return ctx.Err() - case <-time.After(waitBeforeRetry): - } - } - - return ErrWaitTimeout -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchcondition.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchcondition.go new file mode 100644 index 0000000000..ea1dc377b9 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchcondition.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// MatchConditionApplyConfiguration represents an declarative configuration of the MatchCondition type for use +// with apply. +type MatchConditionApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Expression *string `json:"expression,omitempty"` +} + +// MatchConditionApplyConfiguration constructs an declarative configuration of the MatchCondition type for use with +// apply. +func MatchCondition() *MatchConditionApplyConfiguration { + return &MatchConditionApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *MatchConditionApplyConfiguration) WithName(value string) *MatchConditionApplyConfiguration { + b.Name = &value + return b +} + +// WithExpression sets the Expression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Expression field is set to the value of the last call. +func (b *MatchConditionApplyConfiguration) WithExpression(value string) *MatchConditionApplyConfiguration { + b.Expression = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhook.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhook.go index eba37bafdb..faff51a041 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhook.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhook.go @@ -37,6 +37,7 @@ type MutatingWebhookApplyConfiguration struct { TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"` AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty"` ReinvocationPolicy *admissionregistrationv1.ReinvocationPolicyType `json:"reinvocationPolicy,omitempty"` + MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"` } // MutatingWebhookApplyConfiguration constructs an declarative configuration of the MutatingWebhook type for use with @@ -139,3 +140,16 @@ func (b *MutatingWebhookApplyConfiguration) WithReinvocationPolicy(value admissi b.ReinvocationPolicy = &value return b } + +// WithMatchConditions adds the given value to the MatchConditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MatchConditions field. +func (b *MutatingWebhookApplyConfiguration) WithMatchConditions(values ...*MatchConditionApplyConfiguration) *MutatingWebhookApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithMatchConditions") + } + b.MatchConditions = append(b.MatchConditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhook.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhook.go index d0691de107..613856bac7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhook.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhook.go @@ -36,6 +36,7 @@ type ValidatingWebhookApplyConfiguration struct { SideEffects *admissionregistrationv1.SideEffectClass `json:"sideEffects,omitempty"` TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"` AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty"` + MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"` } // ValidatingWebhookApplyConfiguration constructs an declarative configuration of the ValidatingWebhook type for use with @@ -130,3 +131,16 @@ func (b *ValidatingWebhookApplyConfiguration) WithAdmissionReviewVersions(values } return b } + +// WithMatchConditions adds the given value to the MatchConditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MatchConditions field. +func (b *ValidatingWebhookApplyConfiguration) WithMatchConditions(values ...*MatchConditionApplyConfiguration) *ValidatingWebhookApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithMatchConditions") + } + b.MatchConditions = append(b.MatchConditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/admissionpolicyspec.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/admissionpolicyspec.go deleted file mode 100644 index 4936110fbd..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/admissionpolicyspec.go +++ /dev/null @@ -1,75 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" -) - -// AdmissionPolicySpecApplyConfiguration represents an declarative configuration of the AdmissionPolicySpec type for use -// with apply. -type AdmissionPolicySpecApplyConfiguration struct { - ParamSource *ParamSourceApplyConfiguration `json:"paramSource,omitempty"` - MatchResources *MatchResourcesApplyConfiguration `json:"matchResources,omitempty"` - Validations []ValidationApplyConfiguration `json:"validations,omitempty"` - FailurePolicy *admissionregistrationv1alpha1.FailurePolicyType `json:"failurePolicy,omitempty"` -} - -// AdmissionPolicySpecApplyConfiguration constructs an declarative configuration of the AdmissionPolicySpec type for use with -// apply. -func AdmissionPolicySpec() *AdmissionPolicySpecApplyConfiguration { - return &AdmissionPolicySpecApplyConfiguration{} -} - -// WithParamSource sets the ParamSource field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ParamSource field is set to the value of the last call. -func (b *AdmissionPolicySpecApplyConfiguration) WithParamSource(value *ParamSourceApplyConfiguration) *AdmissionPolicySpecApplyConfiguration { - b.ParamSource = value - return b -} - -// WithMatchResources sets the MatchResources field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the MatchResources field is set to the value of the last call. -func (b *AdmissionPolicySpecApplyConfiguration) WithMatchResources(value *MatchResourcesApplyConfiguration) *AdmissionPolicySpecApplyConfiguration { - b.MatchResources = value - return b -} - -// WithValidations adds the given value to the Validations field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Validations field. -func (b *AdmissionPolicySpecApplyConfiguration) WithValidations(values ...*ValidationApplyConfiguration) *AdmissionPolicySpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithValidations") - } - b.Validations = append(b.Validations, *values[i]) - } - return b -} - -// WithFailurePolicy sets the FailurePolicy field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the FailurePolicy field is set to the value of the last call. -func (b *AdmissionPolicySpecApplyConfiguration) WithFailurePolicy(value admissionregistrationv1alpha1.FailurePolicyType) *AdmissionPolicySpecApplyConfiguration { - b.FailurePolicy = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/auditannotation.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/auditannotation.go new file mode 100644 index 0000000000..023695139d --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/auditannotation.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// AuditAnnotationApplyConfiguration represents an declarative configuration of the AuditAnnotation type for use +// with apply. +type AuditAnnotationApplyConfiguration struct { + Key *string `json:"key,omitempty"` + ValueExpression *string `json:"valueExpression,omitempty"` +} + +// AuditAnnotationApplyConfiguration constructs an declarative configuration of the AuditAnnotation type for use with +// apply. +func AuditAnnotation() *AuditAnnotationApplyConfiguration { + return &AuditAnnotationApplyConfiguration{} +} + +// WithKey sets the Key field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Key field is set to the value of the last call. +func (b *AuditAnnotationApplyConfiguration) WithKey(value string) *AuditAnnotationApplyConfiguration { + b.Key = &value + return b +} + +// WithValueExpression sets the ValueExpression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ValueExpression field is set to the value of the last call. +func (b *AuditAnnotationApplyConfiguration) WithValueExpression(value string) *AuditAnnotationApplyConfiguration { + b.ValueExpression = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/expressionwarning.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/expressionwarning.go new file mode 100644 index 0000000000..f8b511f512 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/expressionwarning.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// ExpressionWarningApplyConfiguration represents an declarative configuration of the ExpressionWarning type for use +// with apply. +type ExpressionWarningApplyConfiguration struct { + FieldRef *string `json:"fieldRef,omitempty"` + Warning *string `json:"warning,omitempty"` +} + +// ExpressionWarningApplyConfiguration constructs an declarative configuration of the ExpressionWarning type for use with +// apply. +func ExpressionWarning() *ExpressionWarningApplyConfiguration { + return &ExpressionWarningApplyConfiguration{} +} + +// WithFieldRef sets the FieldRef field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FieldRef field is set to the value of the last call. +func (b *ExpressionWarningApplyConfiguration) WithFieldRef(value string) *ExpressionWarningApplyConfiguration { + b.FieldRef = &value + return b +} + +// WithWarning sets the Warning field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Warning field is set to the value of the last call. +func (b *ExpressionWarningApplyConfiguration) WithWarning(value string) *ExpressionWarningApplyConfiguration { + b.Warning = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramsource.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchcondition.go similarity index 50% rename from vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramsource.go rename to vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchcondition.go index a7a5a6af83..186c750f96 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchcondition.go @@ -18,31 +18,31 @@ limitations under the License. package v1alpha1 -// ParamSourceApplyConfiguration represents an declarative configuration of the ParamSource type for use +// MatchConditionApplyConfiguration represents an declarative configuration of the MatchCondition type for use // with apply. -type ParamSourceApplyConfiguration struct { - APIVersion *string `json:"apiVersion,omitempty"` - Kind *string `json:"kind,omitempty"` +type MatchConditionApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Expression *string `json:"expression,omitempty"` } -// ParamSourceApplyConfiguration constructs an declarative configuration of the ParamSource type for use with +// MatchConditionApplyConfiguration constructs an declarative configuration of the MatchCondition type for use with // apply. -func ParamSource() *ParamSourceApplyConfiguration { - return &ParamSourceApplyConfiguration{} +func MatchCondition() *MatchConditionApplyConfiguration { + return &MatchConditionApplyConfiguration{} } -// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// WithName sets the Name field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the APIVersion field is set to the value of the last call. -func (b *ParamSourceApplyConfiguration) WithAPIVersion(value string) *ParamSourceApplyConfiguration { - b.APIVersion = &value +// If called multiple times, the Name field is set to the value of the last call. +func (b *MatchConditionApplyConfiguration) WithName(value string) *MatchConditionApplyConfiguration { + b.Name = &value return b } -// WithKind sets the Kind field in the declarative configuration to the given value +// WithExpression sets the Expression field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Kind field is set to the value of the last call. -func (b *ParamSourceApplyConfiguration) WithKind(value string) *ParamSourceApplyConfiguration { - b.Kind = &value +// If called multiple times, the Expression field is set to the value of the last call. +func (b *MatchConditionApplyConfiguration) WithExpression(value string) *MatchConditionApplyConfiguration { + b.Expression = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/rule.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/rule.go deleted file mode 100644 index 313de9d5f5..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/rule.go +++ /dev/null @@ -1,76 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" -) - -// RuleApplyConfiguration represents an declarative configuration of the Rule type for use -// with apply. -type RuleApplyConfiguration struct { - APIGroups []string `json:"apiGroups,omitempty"` - APIVersions []string `json:"apiVersions,omitempty"` - Resources []string `json:"resources,omitempty"` - Scope *v1alpha1.ScopeType `json:"scope,omitempty"` -} - -// RuleApplyConfiguration constructs an declarative configuration of the Rule type for use with -// apply. -func Rule() *RuleApplyConfiguration { - return &RuleApplyConfiguration{} -} - -// WithAPIGroups adds the given value to the APIGroups field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the APIGroups field. -func (b *RuleApplyConfiguration) WithAPIGroups(values ...string) *RuleApplyConfiguration { - for i := range values { - b.APIGroups = append(b.APIGroups, values[i]) - } - return b -} - -// WithAPIVersions adds the given value to the APIVersions field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the APIVersions field. -func (b *RuleApplyConfiguration) WithAPIVersions(values ...string) *RuleApplyConfiguration { - for i := range values { - b.APIVersions = append(b.APIVersions, values[i]) - } - return b -} - -// WithResources adds the given value to the Resources field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Resources field. -func (b *RuleApplyConfiguration) WithResources(values ...string) *RuleApplyConfiguration { - for i := range values { - b.Resources = append(b.Resources, values[i]) - } - return b -} - -// WithScope sets the Scope field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Scope field is set to the value of the last call. -func (b *RuleApplyConfiguration) WithScope(value v1alpha1.ScopeType) *RuleApplyConfiguration { - b.Scope = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/rulewithoperations.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/rulewithoperations.go deleted file mode 100644 index 112f4826b6..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/rulewithoperations.go +++ /dev/null @@ -1,85 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1 "k8s.io/api/admissionregistration/v1" - admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" -) - -// RuleWithOperationsApplyConfiguration represents an declarative configuration of the RuleWithOperations type for use -// with apply. -type RuleWithOperationsApplyConfiguration struct { - Operations []v1.OperationType `json:"operations,omitempty"` - admissionregistrationv1.RuleApplyConfiguration `json:",inline"` -} - -// RuleWithOperationsApplyConfiguration constructs an declarative configuration of the RuleWithOperations type for use with -// apply. -func RuleWithOperations() *RuleWithOperationsApplyConfiguration { - return &RuleWithOperationsApplyConfiguration{} -} - -// WithOperations adds the given value to the Operations field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Operations field. -func (b *RuleWithOperationsApplyConfiguration) WithOperations(values ...v1.OperationType) *RuleWithOperationsApplyConfiguration { - for i := range values { - b.Operations = append(b.Operations, values[i]) - } - return b -} - -// WithAPIGroups adds the given value to the APIGroups field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the APIGroups field. -func (b *RuleWithOperationsApplyConfiguration) WithAPIGroups(values ...string) *RuleWithOperationsApplyConfiguration { - for i := range values { - b.APIGroups = append(b.APIGroups, values[i]) - } - return b -} - -// WithAPIVersions adds the given value to the APIVersions field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the APIVersions field. -func (b *RuleWithOperationsApplyConfiguration) WithAPIVersions(values ...string) *RuleWithOperationsApplyConfiguration { - for i := range values { - b.APIVersions = append(b.APIVersions, values[i]) - } - return b -} - -// WithResources adds the given value to the Resources field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Resources field. -func (b *RuleWithOperationsApplyConfiguration) WithResources(values ...string) *RuleWithOperationsApplyConfiguration { - for i := range values { - b.Resources = append(b.Resources, values[i]) - } - return b -} - -// WithScope sets the Scope field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Scope field is set to the value of the last call. -func (b *RuleWithOperationsApplyConfiguration) WithScope(value v1.ScopeType) *RuleWithOperationsApplyConfiguration { - b.Scope = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/typechecking.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/typechecking.go new file mode 100644 index 0000000000..42a9170710 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/typechecking.go @@ -0,0 +1,44 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// TypeCheckingApplyConfiguration represents an declarative configuration of the TypeChecking type for use +// with apply. +type TypeCheckingApplyConfiguration struct { + ExpressionWarnings []ExpressionWarningApplyConfiguration `json:"expressionWarnings,omitempty"` +} + +// TypeCheckingApplyConfiguration constructs an declarative configuration of the TypeChecking type for use with +// apply. +func TypeChecking() *TypeCheckingApplyConfiguration { + return &TypeCheckingApplyConfiguration{} +} + +// WithExpressionWarnings adds the given value to the ExpressionWarnings field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ExpressionWarnings field. +func (b *TypeCheckingApplyConfiguration) WithExpressionWarnings(values ...*ExpressionWarningApplyConfiguration) *TypeCheckingApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithExpressionWarnings") + } + b.ExpressionWarnings = append(b.ExpressionWarnings, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go index 3a23e0c726..c860b85cf7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go @@ -32,7 +32,8 @@ import ( type ValidatingAdmissionPolicyApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *ValidatingAdmissionPolicySpecApplyConfiguration `json:"spec,omitempty"` + Spec *ValidatingAdmissionPolicySpecApplyConfiguration `json:"spec,omitempty"` + Status *ValidatingAdmissionPolicyStatusApplyConfiguration `json:"status,omitempty"` } // ValidatingAdmissionPolicy constructs an declarative configuration of the ValidatingAdmissionPolicy type for use with @@ -245,3 +246,11 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithSpec(value *Validating b.Spec = value return b } + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithStatus(value *ValidatingAdmissionPolicyStatusApplyConfiguration) *ValidatingAdmissionPolicyApplyConfiguration { + b.Status = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybindingspec.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybindingspec.go index f06f655493..c9a4ff7ab4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybindingspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybindingspec.go @@ -18,12 +18,17 @@ limitations under the License. package v1alpha1 +import ( + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" +) + // ValidatingAdmissionPolicyBindingSpecApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use // with apply. type ValidatingAdmissionPolicyBindingSpecApplyConfiguration struct { - PolicyName *string `json:"policyName,omitempty"` - ParamRef *ParamRefApplyConfiguration `json:"paramRef,omitempty"` - MatchResources *MatchResourcesApplyConfiguration `json:"matchResources,omitempty"` + PolicyName *string `json:"policyName,omitempty"` + ParamRef *ParamRefApplyConfiguration `json:"paramRef,omitempty"` + MatchResources *MatchResourcesApplyConfiguration `json:"matchResources,omitempty"` + ValidationActions []admissionregistrationv1alpha1.ValidationAction `json:"validationActions,omitempty"` } // ValidatingAdmissionPolicyBindingSpecApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use with @@ -55,3 +60,13 @@ func (b *ValidatingAdmissionPolicyBindingSpecApplyConfiguration) WithMatchResour b.MatchResources = value return b } + +// WithValidationActions adds the given value to the ValidationActions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ValidationActions field. +func (b *ValidatingAdmissionPolicyBindingSpecApplyConfiguration) WithValidationActions(values ...admissionregistrationv1alpha1.ValidationAction) *ValidatingAdmissionPolicyBindingSpecApplyConfiguration { + for i := range values { + b.ValidationActions = append(b.ValidationActions, values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicyspec.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicyspec.go index cba1e720ce..f674b5b1ec 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicyspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicyspec.go @@ -29,6 +29,8 @@ type ValidatingAdmissionPolicySpecApplyConfiguration struct { MatchConstraints *MatchResourcesApplyConfiguration `json:"matchConstraints,omitempty"` Validations []ValidationApplyConfiguration `json:"validations,omitempty"` FailurePolicy *admissionregistrationv1alpha1.FailurePolicyType `json:"failurePolicy,omitempty"` + AuditAnnotations []AuditAnnotationApplyConfiguration `json:"auditAnnotations,omitempty"` + MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"` } // ValidatingAdmissionPolicySpecApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicySpec type for use with @@ -73,3 +75,29 @@ func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithFailurePolicy(valu b.FailurePolicy = &value return b } + +// WithAuditAnnotations adds the given value to the AuditAnnotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AuditAnnotations field. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithAuditAnnotations(values ...*AuditAnnotationApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithAuditAnnotations") + } + b.AuditAnnotations = append(b.AuditAnnotations, *values[i]) + } + return b +} + +// WithMatchConditions adds the given value to the MatchConditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MatchConditions field. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithMatchConditions(values ...*MatchConditionApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithMatchConditions") + } + b.MatchConditions = append(b.MatchConditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicystatus.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicystatus.go new file mode 100644 index 0000000000..821184c8a8 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicystatus.go @@ -0,0 +1,66 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ValidatingAdmissionPolicyStatusApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyStatus type for use +// with apply. +type ValidatingAdmissionPolicyStatusApplyConfiguration struct { + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` + TypeChecking *TypeCheckingApplyConfiguration `json:"typeChecking,omitempty"` + Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` +} + +// ValidatingAdmissionPolicyStatusApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicyStatus type for use with +// apply. +func ValidatingAdmissionPolicyStatus() *ValidatingAdmissionPolicyStatusApplyConfiguration { + return &ValidatingAdmissionPolicyStatusApplyConfiguration{} +} + +// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedGeneration field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyStatusApplyConfiguration) WithObservedGeneration(value int64) *ValidatingAdmissionPolicyStatusApplyConfiguration { + b.ObservedGeneration = &value + return b +} + +// WithTypeChecking sets the TypeChecking field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TypeChecking field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyStatusApplyConfiguration) WithTypeChecking(value *TypeCheckingApplyConfiguration) *ValidatingAdmissionPolicyStatusApplyConfiguration { + b.TypeChecking = value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *ValidatingAdmissionPolicyStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *ValidatingAdmissionPolicyStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validation.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validation.go index 43916603b1..9a5fc8475a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validation.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validation.go @@ -25,9 +25,10 @@ import ( // ValidationApplyConfiguration represents an declarative configuration of the Validation type for use // with apply. type ValidationApplyConfiguration struct { - Expression *string `json:"expression,omitempty"` - Message *string `json:"message,omitempty"` - Reason *v1.StatusReason `json:"reason,omitempty"` + Expression *string `json:"expression,omitempty"` + Message *string `json:"message,omitempty"` + Reason *v1.StatusReason `json:"reason,omitempty"` + MessageExpression *string `json:"messageExpression,omitempty"` } // ValidationApplyConfiguration constructs an declarative configuration of the Validation type for use with @@ -59,3 +60,11 @@ func (b *ValidationApplyConfiguration) WithReason(value v1.StatusReason) *Valida b.Reason = &value return b } + +// WithMessageExpression sets the MessageExpression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MessageExpression field is set to the value of the last call. +func (b *ValidationApplyConfiguration) WithMessageExpression(value string) *ValidationApplyConfiguration { + b.MessageExpression = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchcondition.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchcondition.go new file mode 100644 index 0000000000..d099b6b6ea --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchcondition.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// MatchConditionApplyConfiguration represents an declarative configuration of the MatchCondition type for use +// with apply. +type MatchConditionApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Expression *string `json:"expression,omitempty"` +} + +// MatchConditionApplyConfiguration constructs an declarative configuration of the MatchCondition type for use with +// apply. +func MatchCondition() *MatchConditionApplyConfiguration { + return &MatchConditionApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *MatchConditionApplyConfiguration) WithName(value string) *MatchConditionApplyConfiguration { + b.Name = &value + return b +} + +// WithExpression sets the Expression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Expression field is set to the value of the last call. +func (b *MatchConditionApplyConfiguration) WithExpression(value string) *MatchConditionApplyConfiguration { + b.Expression = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhook.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhook.go index cc48d3b6f0..54845341f4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhook.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhook.go @@ -38,6 +38,7 @@ type MutatingWebhookApplyConfiguration struct { TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"` AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty"` ReinvocationPolicy *admissionregistrationv1beta1.ReinvocationPolicyType `json:"reinvocationPolicy,omitempty"` + MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"` } // MutatingWebhookApplyConfiguration constructs an declarative configuration of the MutatingWebhook type for use with @@ -140,3 +141,16 @@ func (b *MutatingWebhookApplyConfiguration) WithReinvocationPolicy(value admissi b.ReinvocationPolicy = &value return b } + +// WithMatchConditions adds the given value to the MatchConditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MatchConditions field. +func (b *MutatingWebhookApplyConfiguration) WithMatchConditions(values ...*MatchConditionApplyConfiguration) *MutatingWebhookApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithMatchConditions") + } + b.MatchConditions = append(b.MatchConditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/rule.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/rule.go deleted file mode 100644 index 21151b9980..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/rule.go +++ /dev/null @@ -1,76 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/admissionregistration/v1beta1" -) - -// RuleApplyConfiguration represents an declarative configuration of the Rule type for use -// with apply. -type RuleApplyConfiguration struct { - APIGroups []string `json:"apiGroups,omitempty"` - APIVersions []string `json:"apiVersions,omitempty"` - Resources []string `json:"resources,omitempty"` - Scope *v1beta1.ScopeType `json:"scope,omitempty"` -} - -// RuleApplyConfiguration constructs an declarative configuration of the Rule type for use with -// apply. -func Rule() *RuleApplyConfiguration { - return &RuleApplyConfiguration{} -} - -// WithAPIGroups adds the given value to the APIGroups field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the APIGroups field. -func (b *RuleApplyConfiguration) WithAPIGroups(values ...string) *RuleApplyConfiguration { - for i := range values { - b.APIGroups = append(b.APIGroups, values[i]) - } - return b -} - -// WithAPIVersions adds the given value to the APIVersions field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the APIVersions field. -func (b *RuleApplyConfiguration) WithAPIVersions(values ...string) *RuleApplyConfiguration { - for i := range values { - b.APIVersions = append(b.APIVersions, values[i]) - } - return b -} - -// WithResources adds the given value to the Resources field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Resources field. -func (b *RuleApplyConfiguration) WithResources(values ...string) *RuleApplyConfiguration { - for i := range values { - b.Resources = append(b.Resources, values[i]) - } - return b -} - -// WithScope sets the Scope field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Scope field is set to the value of the last call. -func (b *RuleApplyConfiguration) WithScope(value v1beta1.ScopeType) *RuleApplyConfiguration { - b.Scope = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/rulewithoperations.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/rulewithoperations.go deleted file mode 100644 index 0fd5dd34db..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/rulewithoperations.go +++ /dev/null @@ -1,85 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1 "k8s.io/api/admissionregistration/v1" - admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" -) - -// RuleWithOperationsApplyConfiguration represents an declarative configuration of the RuleWithOperations type for use -// with apply. -type RuleWithOperationsApplyConfiguration struct { - Operations []v1.OperationType `json:"operations,omitempty"` - admissionregistrationv1.RuleApplyConfiguration `json:",inline"` -} - -// RuleWithOperationsApplyConfiguration constructs an declarative configuration of the RuleWithOperations type for use with -// apply. -func RuleWithOperations() *RuleWithOperationsApplyConfiguration { - return &RuleWithOperationsApplyConfiguration{} -} - -// WithOperations adds the given value to the Operations field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Operations field. -func (b *RuleWithOperationsApplyConfiguration) WithOperations(values ...v1.OperationType) *RuleWithOperationsApplyConfiguration { - for i := range values { - b.Operations = append(b.Operations, values[i]) - } - return b -} - -// WithAPIGroups adds the given value to the APIGroups field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the APIGroups field. -func (b *RuleWithOperationsApplyConfiguration) WithAPIGroups(values ...string) *RuleWithOperationsApplyConfiguration { - for i := range values { - b.APIGroups = append(b.APIGroups, values[i]) - } - return b -} - -// WithAPIVersions adds the given value to the APIVersions field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the APIVersions field. -func (b *RuleWithOperationsApplyConfiguration) WithAPIVersions(values ...string) *RuleWithOperationsApplyConfiguration { - for i := range values { - b.APIVersions = append(b.APIVersions, values[i]) - } - return b -} - -// WithResources adds the given value to the Resources field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Resources field. -func (b *RuleWithOperationsApplyConfiguration) WithResources(values ...string) *RuleWithOperationsApplyConfiguration { - for i := range values { - b.Resources = append(b.Resources, values[i]) - } - return b -} - -// WithScope sets the Scope field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Scope field is set to the value of the last call. -func (b *RuleWithOperationsApplyConfiguration) WithScope(value v1.ScopeType) *RuleWithOperationsApplyConfiguration { - b.Scope = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhook.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhook.go index 84479b5db3..8c5c341bad 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhook.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhook.go @@ -37,6 +37,7 @@ type ValidatingWebhookApplyConfiguration struct { SideEffects *admissionregistrationv1beta1.SideEffectClass `json:"sideEffects,omitempty"` TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"` AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty"` + MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"` } // ValidatingWebhookApplyConfiguration constructs an declarative configuration of the ValidatingWebhook type for use with @@ -131,3 +132,16 @@ func (b *ValidatingWebhookApplyConfiguration) WithAdmissionReviewVersions(values } return b } + +// WithMatchConditions adds the given value to the MatchConditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MatchConditions field. +func (b *ValidatingWebhookApplyConfiguration) WithMatchConditions(values ...*MatchConditionApplyConfiguration) *ValidatingWebhookApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithMatchConditions") + } + b.MatchConditions = append(b.MatchConditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podresourcemetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podresourcemetricsource.go deleted file mode 100644 index 86601cc48a..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podresourcemetricsource.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v2 - -import ( - v1 "k8s.io/api/core/v1" -) - -// PodResourceMetricSourceApplyConfiguration represents an declarative configuration of the PodResourceMetricSource type for use -// with apply. -type PodResourceMetricSourceApplyConfiguration struct { - Name *v1.ResourceName `json:"name,omitempty"` - Target *MetricTargetApplyConfiguration `json:"target,omitempty"` -} - -// PodResourceMetricSourceApplyConfiguration constructs an declarative configuration of the PodResourceMetricSource type for use with -// apply. -func PodResourceMetricSource() *PodResourceMetricSourceApplyConfiguration { - return &PodResourceMetricSourceApplyConfiguration{} -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *PodResourceMetricSourceApplyConfiguration) WithName(value v1.ResourceName) *PodResourceMetricSourceApplyConfiguration { - b.Name = &value - return b -} - -// WithTarget sets the Target field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Target field is set to the value of the last call. -func (b *PodResourceMetricSourceApplyConfiguration) WithTarget(value *MetricTargetApplyConfiguration) *PodResourceMetricSourceApplyConfiguration { - b.Target = value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/podscheduling.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundle.go similarity index 64% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/podscheduling.go rename to vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundle.go index 44890c2d92..788d2a07dc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/podscheduling.go +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundle.go @@ -19,7 +19,7 @@ limitations under the License. package v1alpha1 import ( - resourcev1alpha1 "k8s.io/api/resource/v1alpha1" + certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -27,66 +27,63 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodSchedulingApplyConfiguration represents an declarative configuration of the PodScheduling type for use +// ClusterTrustBundleApplyConfiguration represents an declarative configuration of the ClusterTrustBundle type for use // with apply. -type PodSchedulingApplyConfiguration struct { +type ClusterTrustBundleApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *PodSchedulingSpecApplyConfiguration `json:"spec,omitempty"` - Status *PodSchedulingStatusApplyConfiguration `json:"status,omitempty"` + Spec *ClusterTrustBundleSpecApplyConfiguration `json:"spec,omitempty"` } -// PodScheduling constructs an declarative configuration of the PodScheduling type for use with +// ClusterTrustBundle constructs an declarative configuration of the ClusterTrustBundle type for use with // apply. -func PodScheduling(name, namespace string) *PodSchedulingApplyConfiguration { - b := &PodSchedulingApplyConfiguration{} +func ClusterTrustBundle(name string) *ClusterTrustBundleApplyConfiguration { + b := &ClusterTrustBundleApplyConfiguration{} b.WithName(name) - b.WithNamespace(namespace) - b.WithKind("PodScheduling") - b.WithAPIVersion("resource.k8s.io/v1alpha1") + b.WithKind("ClusterTrustBundle") + b.WithAPIVersion("certificates.k8s.io/v1alpha1") return b } -// ExtractPodScheduling extracts the applied configuration owned by fieldManager from -// podScheduling. If no managedFields are found in podScheduling for fieldManager, a -// PodSchedulingApplyConfiguration is returned with only the Name, Namespace (if applicable), +// ExtractClusterTrustBundle extracts the applied configuration owned by fieldManager from +// clusterTrustBundle. If no managedFields are found in clusterTrustBundle for fieldManager, a +// ClusterTrustBundleApplyConfiguration is returned with only the Name, Namespace (if applicable), // APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. -// podScheduling must be a unmodified PodScheduling API object that was retrieved from the Kubernetes API. -// ExtractPodScheduling provides a way to perform a extract/modify-in-place/apply workflow. +// clusterTrustBundle must be a unmodified ClusterTrustBundle API object that was retrieved from the Kubernetes API. +// ExtractClusterTrustBundle provides a way to perform a extract/modify-in-place/apply workflow. // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractPodScheduling(podScheduling *resourcev1alpha1.PodScheduling, fieldManager string) (*PodSchedulingApplyConfiguration, error) { - return extractPodScheduling(podScheduling, fieldManager, "") +func ExtractClusterTrustBundle(clusterTrustBundle *certificatesv1alpha1.ClusterTrustBundle, fieldManager string) (*ClusterTrustBundleApplyConfiguration, error) { + return extractClusterTrustBundle(clusterTrustBundle, fieldManager, "") } -// ExtractPodSchedulingStatus is the same as ExtractPodScheduling except +// ExtractClusterTrustBundleStatus is the same as ExtractClusterTrustBundle except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractPodSchedulingStatus(podScheduling *resourcev1alpha1.PodScheduling, fieldManager string) (*PodSchedulingApplyConfiguration, error) { - return extractPodScheduling(podScheduling, fieldManager, "status") +func ExtractClusterTrustBundleStatus(clusterTrustBundle *certificatesv1alpha1.ClusterTrustBundle, fieldManager string) (*ClusterTrustBundleApplyConfiguration, error) { + return extractClusterTrustBundle(clusterTrustBundle, fieldManager, "status") } -func extractPodScheduling(podScheduling *resourcev1alpha1.PodScheduling, fieldManager string, subresource string) (*PodSchedulingApplyConfiguration, error) { - b := &PodSchedulingApplyConfiguration{} - err := managedfields.ExtractInto(podScheduling, internal.Parser().Type("io.k8s.api.resource.v1alpha1.PodScheduling"), fieldManager, b, subresource) +func extractClusterTrustBundle(clusterTrustBundle *certificatesv1alpha1.ClusterTrustBundle, fieldManager string, subresource string) (*ClusterTrustBundleApplyConfiguration, error) { + b := &ClusterTrustBundleApplyConfiguration{} + err := managedfields.ExtractInto(clusterTrustBundle, internal.Parser().Type("io.k8s.api.certificates.v1alpha1.ClusterTrustBundle"), fieldManager, b, subresource) if err != nil { return nil, err } - b.WithName(podScheduling.Name) - b.WithNamespace(podScheduling.Namespace) + b.WithName(clusterTrustBundle.Name) - b.WithKind("PodScheduling") - b.WithAPIVersion("resource.k8s.io/v1alpha1") + b.WithKind("ClusterTrustBundle") + b.WithAPIVersion("certificates.k8s.io/v1alpha1") return b, nil } // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. -func (b *PodSchedulingApplyConfiguration) WithKind(value string) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithKind(value string) *ClusterTrustBundleApplyConfiguration { b.Kind = &value return b } @@ -94,7 +91,7 @@ func (b *PodSchedulingApplyConfiguration) WithKind(value string) *PodSchedulingA // WithAPIVersion sets the APIVersion field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. -func (b *PodSchedulingApplyConfiguration) WithAPIVersion(value string) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithAPIVersion(value string) *ClusterTrustBundleApplyConfiguration { b.APIVersion = &value return b } @@ -102,7 +99,7 @@ func (b *PodSchedulingApplyConfiguration) WithAPIVersion(value string) *PodSched // WithName sets the Name field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. -func (b *PodSchedulingApplyConfiguration) WithName(value string) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithName(value string) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Name = &value return b @@ -111,7 +108,7 @@ func (b *PodSchedulingApplyConfiguration) WithName(value string) *PodSchedulingA // WithGenerateName sets the GenerateName field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the GenerateName field is set to the value of the last call. -func (b *PodSchedulingApplyConfiguration) WithGenerateName(value string) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithGenerateName(value string) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.GenerateName = &value return b @@ -120,7 +117,7 @@ func (b *PodSchedulingApplyConfiguration) WithGenerateName(value string) *PodSch // WithNamespace sets the Namespace field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Namespace field is set to the value of the last call. -func (b *PodSchedulingApplyConfiguration) WithNamespace(value string) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithNamespace(value string) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Namespace = &value return b @@ -129,7 +126,7 @@ func (b *PodSchedulingApplyConfiguration) WithNamespace(value string) *PodSchedu // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. -func (b *PodSchedulingApplyConfiguration) WithUID(value types.UID) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithUID(value types.UID) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.UID = &value return b @@ -138,7 +135,7 @@ func (b *PodSchedulingApplyConfiguration) WithUID(value types.UID) *PodSchedulin // WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ResourceVersion field is set to the value of the last call. -func (b *PodSchedulingApplyConfiguration) WithResourceVersion(value string) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithResourceVersion(value string) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.ResourceVersion = &value return b @@ -147,7 +144,7 @@ func (b *PodSchedulingApplyConfiguration) WithResourceVersion(value string) *Pod // WithGeneration sets the Generation field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Generation field is set to the value of the last call. -func (b *PodSchedulingApplyConfiguration) WithGeneration(value int64) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithGeneration(value int64) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Generation = &value return b @@ -156,7 +153,7 @@ func (b *PodSchedulingApplyConfiguration) WithGeneration(value int64) *PodSchedu // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *PodSchedulingApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.CreationTimestamp = &value return b @@ -165,7 +162,7 @@ func (b *PodSchedulingApplyConfiguration) WithCreationTimestamp(value metav1.Tim // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *PodSchedulingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.DeletionTimestamp = &value return b @@ -174,7 +171,7 @@ func (b *PodSchedulingApplyConfiguration) WithDeletionTimestamp(value metav1.Tim // WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. -func (b *PodSchedulingApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.DeletionGracePeriodSeconds = &value return b @@ -184,7 +181,7 @@ func (b *PodSchedulingApplyConfiguration) WithDeletionGracePeriodSeconds(value i // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, the entries provided by each call will be put on the Labels field, // overwriting an existing map entries in Labels field with the same key. -func (b *PodSchedulingApplyConfiguration) WithLabels(entries map[string]string) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithLabels(entries map[string]string) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() if b.Labels == nil && len(entries) > 0 { b.Labels = make(map[string]string, len(entries)) @@ -199,7 +196,7 @@ func (b *PodSchedulingApplyConfiguration) WithLabels(entries map[string]string) // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, the entries provided by each call will be put on the Annotations field, // overwriting an existing map entries in Annotations field with the same key. -func (b *PodSchedulingApplyConfiguration) WithAnnotations(entries map[string]string) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() if b.Annotations == nil && len(entries) > 0 { b.Annotations = make(map[string]string, len(entries)) @@ -213,7 +210,7 @@ func (b *PodSchedulingApplyConfiguration) WithAnnotations(entries map[string]str // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *PodSchedulingApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { @@ -227,7 +224,7 @@ func (b *PodSchedulingApplyConfiguration) WithOwnerReferences(values ...*v1.Owne // WithFinalizers adds the given value to the Finalizers field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Finalizers field. -func (b *PodSchedulingApplyConfiguration) WithFinalizers(values ...string) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithFinalizers(values ...string) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { b.Finalizers = append(b.Finalizers, values[i]) @@ -235,7 +232,7 @@ func (b *PodSchedulingApplyConfiguration) WithFinalizers(values ...string) *PodS return b } -func (b *PodSchedulingApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { +func (b *ClusterTrustBundleApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} } @@ -244,15 +241,7 @@ func (b *PodSchedulingApplyConfiguration) ensureObjectMetaApplyConfigurationExis // WithSpec sets the Spec field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Spec field is set to the value of the last call. -func (b *PodSchedulingApplyConfiguration) WithSpec(value *PodSchedulingSpecApplyConfiguration) *PodSchedulingApplyConfiguration { +func (b *ClusterTrustBundleApplyConfiguration) WithSpec(value *ClusterTrustBundleSpecApplyConfiguration) *ClusterTrustBundleApplyConfiguration { b.Spec = value return b } - -// WithStatus sets the Status field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Status field is set to the value of the last call. -func (b *PodSchedulingApplyConfiguration) WithStatus(value *PodSchedulingStatusApplyConfiguration) *PodSchedulingApplyConfiguration { - b.Status = value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundlespec.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundlespec.go new file mode 100644 index 0000000000..d1aea1d6dc --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundlespec.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// ClusterTrustBundleSpecApplyConfiguration represents an declarative configuration of the ClusterTrustBundleSpec type for use +// with apply. +type ClusterTrustBundleSpecApplyConfiguration struct { + SignerName *string `json:"signerName,omitempty"` + TrustBundle *string `json:"trustBundle,omitempty"` +} + +// ClusterTrustBundleSpecApplyConfiguration constructs an declarative configuration of the ClusterTrustBundleSpec type for use with +// apply. +func ClusterTrustBundleSpec() *ClusterTrustBundleSpecApplyConfiguration { + return &ClusterTrustBundleSpecApplyConfiguration{} +} + +// WithSignerName sets the SignerName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SignerName field is set to the value of the last call. +func (b *ClusterTrustBundleSpecApplyConfiguration) WithSignerName(value string) *ClusterTrustBundleSpecApplyConfiguration { + b.SignerName = &value + return b +} + +// WithTrustBundle sets the TrustBundle field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TrustBundle field is set to the value of the last call. +func (b *ClusterTrustBundleSpecApplyConfiguration) WithTrustBundle(value string) *ClusterTrustBundleSpecApplyConfiguration { + b.TrustBundle = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/container.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/container.go index d3b066d9c4..9ada59ee20 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/container.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/container.go @@ -25,28 +25,29 @@ import ( // ContainerApplyConfiguration represents an declarative configuration of the Container type for use // with apply. type ContainerApplyConfiguration struct { - Name *string `json:"name,omitempty"` - Image *string `json:"image,omitempty"` - Command []string `json:"command,omitempty"` - Args []string `json:"args,omitempty"` - WorkingDir *string `json:"workingDir,omitempty"` - Ports []ContainerPortApplyConfiguration `json:"ports,omitempty"` - EnvFrom []EnvFromSourceApplyConfiguration `json:"envFrom,omitempty"` - Env []EnvVarApplyConfiguration `json:"env,omitempty"` - Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"` - VolumeMounts []VolumeMountApplyConfiguration `json:"volumeMounts,omitempty"` - VolumeDevices []VolumeDeviceApplyConfiguration `json:"volumeDevices,omitempty"` - LivenessProbe *ProbeApplyConfiguration `json:"livenessProbe,omitempty"` - ReadinessProbe *ProbeApplyConfiguration `json:"readinessProbe,omitempty"` - StartupProbe *ProbeApplyConfiguration `json:"startupProbe,omitempty"` - Lifecycle *LifecycleApplyConfiguration `json:"lifecycle,omitempty"` - TerminationMessagePath *string `json:"terminationMessagePath,omitempty"` - TerminationMessagePolicy *corev1.TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty"` - ImagePullPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"` - SecurityContext *SecurityContextApplyConfiguration `json:"securityContext,omitempty"` - Stdin *bool `json:"stdin,omitempty"` - StdinOnce *bool `json:"stdinOnce,omitempty"` - TTY *bool `json:"tty,omitempty"` + Name *string `json:"name,omitempty"` + Image *string `json:"image,omitempty"` + Command []string `json:"command,omitempty"` + Args []string `json:"args,omitempty"` + WorkingDir *string `json:"workingDir,omitempty"` + Ports []ContainerPortApplyConfiguration `json:"ports,omitempty"` + EnvFrom []EnvFromSourceApplyConfiguration `json:"envFrom,omitempty"` + Env []EnvVarApplyConfiguration `json:"env,omitempty"` + Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"` + ResizePolicy []ContainerResizePolicyApplyConfiguration `json:"resizePolicy,omitempty"` + VolumeMounts []VolumeMountApplyConfiguration `json:"volumeMounts,omitempty"` + VolumeDevices []VolumeDeviceApplyConfiguration `json:"volumeDevices,omitempty"` + LivenessProbe *ProbeApplyConfiguration `json:"livenessProbe,omitempty"` + ReadinessProbe *ProbeApplyConfiguration `json:"readinessProbe,omitempty"` + StartupProbe *ProbeApplyConfiguration `json:"startupProbe,omitempty"` + Lifecycle *LifecycleApplyConfiguration `json:"lifecycle,omitempty"` + TerminationMessagePath *string `json:"terminationMessagePath,omitempty"` + TerminationMessagePolicy *corev1.TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty"` + ImagePullPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"` + SecurityContext *SecurityContextApplyConfiguration `json:"securityContext,omitempty"` + Stdin *bool `json:"stdin,omitempty"` + StdinOnce *bool `json:"stdinOnce,omitempty"` + TTY *bool `json:"tty,omitempty"` } // ContainerApplyConfiguration constructs an declarative configuration of the Container type for use with @@ -146,6 +147,19 @@ func (b *ContainerApplyConfiguration) WithResources(value *ResourceRequirementsA return b } +// WithResizePolicy adds the given value to the ResizePolicy field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResizePolicy field. +func (b *ContainerApplyConfiguration) WithResizePolicy(values ...*ContainerResizePolicyApplyConfiguration) *ContainerApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResizePolicy") + } + b.ResizePolicy = append(b.ResizePolicy, *values[i]) + } + return b +} + // WithVolumeMounts adds the given value to the VolumeMounts field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the VolumeMounts field. diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go new file mode 100644 index 0000000000..bbbcbc9f13 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// ContainerResizePolicyApplyConfiguration represents an declarative configuration of the ContainerResizePolicy type for use +// with apply. +type ContainerResizePolicyApplyConfiguration struct { + ResourceName *v1.ResourceName `json:"resourceName,omitempty"` + RestartPolicy *v1.ResourceResizeRestartPolicy `json:"restartPolicy,omitempty"` +} + +// ContainerResizePolicyApplyConfiguration constructs an declarative configuration of the ContainerResizePolicy type for use with +// apply. +func ContainerResizePolicy() *ContainerResizePolicyApplyConfiguration { + return &ContainerResizePolicyApplyConfiguration{} +} + +// WithResourceName sets the ResourceName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceName field is set to the value of the last call. +func (b *ContainerResizePolicyApplyConfiguration) WithResourceName(value v1.ResourceName) *ContainerResizePolicyApplyConfiguration { + b.ResourceName = &value + return b +} + +// WithRestartPolicy sets the RestartPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RestartPolicy field is set to the value of the last call. +func (b *ContainerResizePolicyApplyConfiguration) WithRestartPolicy(value v1.ResourceResizeRestartPolicy) *ContainerResizePolicyApplyConfiguration { + b.RestartPolicy = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatus.go index 18d2925c17..2b98c4658f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatus.go @@ -18,18 +18,24 @@ limitations under the License. package v1 +import ( + corev1 "k8s.io/api/core/v1" +) + // ContainerStatusApplyConfiguration represents an declarative configuration of the ContainerStatus type for use // with apply. type ContainerStatusApplyConfiguration struct { - Name *string `json:"name,omitempty"` - State *ContainerStateApplyConfiguration `json:"state,omitempty"` - LastTerminationState *ContainerStateApplyConfiguration `json:"lastState,omitempty"` - Ready *bool `json:"ready,omitempty"` - RestartCount *int32 `json:"restartCount,omitempty"` - Image *string `json:"image,omitempty"` - ImageID *string `json:"imageID,omitempty"` - ContainerID *string `json:"containerID,omitempty"` - Started *bool `json:"started,omitempty"` + Name *string `json:"name,omitempty"` + State *ContainerStateApplyConfiguration `json:"state,omitempty"` + LastTerminationState *ContainerStateApplyConfiguration `json:"lastState,omitempty"` + Ready *bool `json:"ready,omitempty"` + RestartCount *int32 `json:"restartCount,omitempty"` + Image *string `json:"image,omitempty"` + ImageID *string `json:"imageID,omitempty"` + ContainerID *string `json:"containerID,omitempty"` + Started *bool `json:"started,omitempty"` + AllocatedResources *corev1.ResourceList `json:"allocatedResources,omitempty"` + Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"` } // ContainerStatusApplyConfiguration constructs an declarative configuration of the ContainerStatus type for use with @@ -109,3 +115,19 @@ func (b *ContainerStatusApplyConfiguration) WithStarted(value bool) *ContainerSt b.Started = &value return b } + +// WithAllocatedResources sets the AllocatedResources field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AllocatedResources field is set to the value of the last call. +func (b *ContainerStatusApplyConfiguration) WithAllocatedResources(value corev1.ResourceList) *ContainerStatusApplyConfiguration { + b.AllocatedResources = &value + return b +} + +// WithResources sets the Resources field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resources field is set to the value of the last call. +func (b *ContainerStatusApplyConfiguration) WithResources(value *ResourceRequirementsApplyConfiguration) *ContainerStatusApplyConfiguration { + b.Resources = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go index 6c24cd419d..c51049ba1f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go @@ -126,6 +126,19 @@ func (b *EphemeralContainerApplyConfiguration) WithResources(value *ResourceRequ return b } +// WithResizePolicy adds the given value to the ResizePolicy field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResizePolicy field. +func (b *EphemeralContainerApplyConfiguration) WithResizePolicy(values ...*ContainerResizePolicyApplyConfiguration) *EphemeralContainerApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResizePolicy") + } + b.ResizePolicy = append(b.ResizePolicy, *values[i]) + } + return b +} + // WithVolumeMounts adds the given value to the VolumeMounts field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the VolumeMounts field. diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainercommon.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainercommon.go index 67e658cfab..764b830e04 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainercommon.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainercommon.go @@ -25,28 +25,29 @@ import ( // EphemeralContainerCommonApplyConfiguration represents an declarative configuration of the EphemeralContainerCommon type for use // with apply. type EphemeralContainerCommonApplyConfiguration struct { - Name *string `json:"name,omitempty"` - Image *string `json:"image,omitempty"` - Command []string `json:"command,omitempty"` - Args []string `json:"args,omitempty"` - WorkingDir *string `json:"workingDir,omitempty"` - Ports []ContainerPortApplyConfiguration `json:"ports,omitempty"` - EnvFrom []EnvFromSourceApplyConfiguration `json:"envFrom,omitempty"` - Env []EnvVarApplyConfiguration `json:"env,omitempty"` - Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"` - VolumeMounts []VolumeMountApplyConfiguration `json:"volumeMounts,omitempty"` - VolumeDevices []VolumeDeviceApplyConfiguration `json:"volumeDevices,omitempty"` - LivenessProbe *ProbeApplyConfiguration `json:"livenessProbe,omitempty"` - ReadinessProbe *ProbeApplyConfiguration `json:"readinessProbe,omitempty"` - StartupProbe *ProbeApplyConfiguration `json:"startupProbe,omitempty"` - Lifecycle *LifecycleApplyConfiguration `json:"lifecycle,omitempty"` - TerminationMessagePath *string `json:"terminationMessagePath,omitempty"` - TerminationMessagePolicy *corev1.TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty"` - ImagePullPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"` - SecurityContext *SecurityContextApplyConfiguration `json:"securityContext,omitempty"` - Stdin *bool `json:"stdin,omitempty"` - StdinOnce *bool `json:"stdinOnce,omitempty"` - TTY *bool `json:"tty,omitempty"` + Name *string `json:"name,omitempty"` + Image *string `json:"image,omitempty"` + Command []string `json:"command,omitempty"` + Args []string `json:"args,omitempty"` + WorkingDir *string `json:"workingDir,omitempty"` + Ports []ContainerPortApplyConfiguration `json:"ports,omitempty"` + EnvFrom []EnvFromSourceApplyConfiguration `json:"envFrom,omitempty"` + Env []EnvVarApplyConfiguration `json:"env,omitempty"` + Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"` + ResizePolicy []ContainerResizePolicyApplyConfiguration `json:"resizePolicy,omitempty"` + VolumeMounts []VolumeMountApplyConfiguration `json:"volumeMounts,omitempty"` + VolumeDevices []VolumeDeviceApplyConfiguration `json:"volumeDevices,omitempty"` + LivenessProbe *ProbeApplyConfiguration `json:"livenessProbe,omitempty"` + ReadinessProbe *ProbeApplyConfiguration `json:"readinessProbe,omitempty"` + StartupProbe *ProbeApplyConfiguration `json:"startupProbe,omitempty"` + Lifecycle *LifecycleApplyConfiguration `json:"lifecycle,omitempty"` + TerminationMessagePath *string `json:"terminationMessagePath,omitempty"` + TerminationMessagePolicy *corev1.TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty"` + ImagePullPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"` + SecurityContext *SecurityContextApplyConfiguration `json:"securityContext,omitempty"` + Stdin *bool `json:"stdin,omitempty"` + StdinOnce *bool `json:"stdinOnce,omitempty"` + TTY *bool `json:"tty,omitempty"` } // EphemeralContainerCommonApplyConfiguration constructs an declarative configuration of the EphemeralContainerCommon type for use with @@ -146,6 +147,19 @@ func (b *EphemeralContainerCommonApplyConfiguration) WithResources(value *Resour return b } +// WithResizePolicy adds the given value to the ResizePolicy field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResizePolicy field. +func (b *EphemeralContainerCommonApplyConfiguration) WithResizePolicy(values ...*ContainerResizePolicyApplyConfiguration) *EphemeralContainerCommonApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResizePolicy") + } + b.ResizePolicy = append(b.ResizePolicy, *values[i]) + } + return b +} + // WithVolumeMounts adds the given value to the VolumeMounts field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the VolumeMounts field. diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go index 7ee5b9955f..e9d8e5b28f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go @@ -39,6 +39,7 @@ type PodStatusApplyConfiguration struct { ContainerStatuses []ContainerStatusApplyConfiguration `json:"containerStatuses,omitempty"` QOSClass *v1.PodQOSClass `json:"qosClass,omitempty"` EphemeralContainerStatuses []ContainerStatusApplyConfiguration `json:"ephemeralContainerStatuses,omitempty"` + Resize *v1.PodResizeStatus `json:"resize,omitempty"` } // PodStatusApplyConfiguration constructs an declarative configuration of the PodStatus type for use with @@ -175,3 +176,11 @@ func (b *PodStatusApplyConfiguration) WithEphemeralContainerStatuses(values ...* } return b } + +// WithResize sets the Resize field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resize field is set to the value of the last call. +func (b *PodStatusApplyConfiguration) WithResize(value v1.PodResizeStatus) *PodStatusApplyConfiguration { + b.Resize = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go index db376b941b..493af6fb3c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go @@ -35,7 +35,7 @@ type ServiceSpecApplyConfiguration struct { LoadBalancerIP *string `json:"loadBalancerIP,omitempty"` LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty"` ExternalName *string `json:"externalName,omitempty"` - ExternalTrafficPolicy *corev1.ServiceExternalTrafficPolicyType `json:"externalTrafficPolicy,omitempty"` + ExternalTrafficPolicy *corev1.ServiceExternalTrafficPolicy `json:"externalTrafficPolicy,omitempty"` HealthCheckNodePort *int32 `json:"healthCheckNodePort,omitempty"` PublishNotReadyAddresses *bool `json:"publishNotReadyAddresses,omitempty"` SessionAffinityConfig *SessionAffinityConfigApplyConfiguration `json:"sessionAffinityConfig,omitempty"` @@ -43,7 +43,7 @@ type ServiceSpecApplyConfiguration struct { IPFamilyPolicy *corev1.IPFamilyPolicy `json:"ipFamilyPolicy,omitempty"` AllocateLoadBalancerNodePorts *bool `json:"allocateLoadBalancerNodePorts,omitempty"` LoadBalancerClass *string `json:"loadBalancerClass,omitempty"` - InternalTrafficPolicy *corev1.ServiceInternalTrafficPolicyType `json:"internalTrafficPolicy,omitempty"` + InternalTrafficPolicy *corev1.ServiceInternalTrafficPolicy `json:"internalTrafficPolicy,omitempty"` } // ServiceSpecApplyConfiguration constructs an declarative configuration of the ServiceSpec type for use with @@ -152,7 +152,7 @@ func (b *ServiceSpecApplyConfiguration) WithExternalName(value string) *ServiceS // WithExternalTrafficPolicy sets the ExternalTrafficPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ExternalTrafficPolicy field is set to the value of the last call. -func (b *ServiceSpecApplyConfiguration) WithExternalTrafficPolicy(value corev1.ServiceExternalTrafficPolicyType) *ServiceSpecApplyConfiguration { +func (b *ServiceSpecApplyConfiguration) WithExternalTrafficPolicy(value corev1.ServiceExternalTrafficPolicy) *ServiceSpecApplyConfiguration { b.ExternalTrafficPolicy = &value return b } @@ -218,7 +218,7 @@ func (b *ServiceSpecApplyConfiguration) WithLoadBalancerClass(value string) *Ser // WithInternalTrafficPolicy sets the InternalTrafficPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the InternalTrafficPolicy field is set to the value of the last call. -func (b *ServiceSpecApplyConfiguration) WithInternalTrafficPolicy(value corev1.ServiceInternalTrafficPolicyType) *ServiceSpecApplyConfiguration { +func (b *ServiceSpecApplyConfiguration) WithInternalTrafficPolicy(value corev1.ServiceInternalTrafficPolicy) *ServiceSpecApplyConfiguration { b.InternalTrafficPolicy = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedcsidriver.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedcsidriver.go deleted file mode 100644 index 27b49bf153..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedcsidriver.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -// AllowedCSIDriverApplyConfiguration represents an declarative configuration of the AllowedCSIDriver type for use -// with apply. -type AllowedCSIDriverApplyConfiguration struct { - Name *string `json:"name,omitempty"` -} - -// AllowedCSIDriverApplyConfiguration constructs an declarative configuration of the AllowedCSIDriver type for use with -// apply. -func AllowedCSIDriver() *AllowedCSIDriverApplyConfiguration { - return &AllowedCSIDriverApplyConfiguration{} -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *AllowedCSIDriverApplyConfiguration) WithName(value string) *AllowedCSIDriverApplyConfiguration { - b.Name = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedflexvolume.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedflexvolume.go deleted file mode 100644 index 30c3724cfe..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedflexvolume.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -// AllowedFlexVolumeApplyConfiguration represents an declarative configuration of the AllowedFlexVolume type for use -// with apply. -type AllowedFlexVolumeApplyConfiguration struct { - Driver *string `json:"driver,omitempty"` -} - -// AllowedFlexVolumeApplyConfiguration constructs an declarative configuration of the AllowedFlexVolume type for use with -// apply. -func AllowedFlexVolume() *AllowedFlexVolumeApplyConfiguration { - return &AllowedFlexVolumeApplyConfiguration{} -} - -// WithDriver sets the Driver field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Driver field is set to the value of the last call. -func (b *AllowedFlexVolumeApplyConfiguration) WithDriver(value string) *AllowedFlexVolumeApplyConfiguration { - b.Driver = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedhostpath.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedhostpath.go deleted file mode 100644 index 493815d8d4..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedhostpath.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -// AllowedHostPathApplyConfiguration represents an declarative configuration of the AllowedHostPath type for use -// with apply. -type AllowedHostPathApplyConfiguration struct { - PathPrefix *string `json:"pathPrefix,omitempty"` - ReadOnly *bool `json:"readOnly,omitempty"` -} - -// AllowedHostPathApplyConfiguration constructs an declarative configuration of the AllowedHostPath type for use with -// apply. -func AllowedHostPath() *AllowedHostPathApplyConfiguration { - return &AllowedHostPathApplyConfiguration{} -} - -// WithPathPrefix sets the PathPrefix field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the PathPrefix field is set to the value of the last call. -func (b *AllowedHostPathApplyConfiguration) WithPathPrefix(value string) *AllowedHostPathApplyConfiguration { - b.PathPrefix = &value - return b -} - -// WithReadOnly sets the ReadOnly field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ReadOnly field is set to the value of the last call. -func (b *AllowedHostPathApplyConfiguration) WithReadOnly(value bool) *AllowedHostPathApplyConfiguration { - b.ReadOnly = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/fsgroupstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/fsgroupstrategyoptions.go deleted file mode 100644 index c7434a6af0..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/fsgroupstrategyoptions.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" -) - -// FSGroupStrategyOptionsApplyConfiguration represents an declarative configuration of the FSGroupStrategyOptions type for use -// with apply. -type FSGroupStrategyOptionsApplyConfiguration struct { - Rule *v1beta1.FSGroupStrategyType `json:"rule,omitempty"` - Ranges []IDRangeApplyConfiguration `json:"ranges,omitempty"` -} - -// FSGroupStrategyOptionsApplyConfiguration constructs an declarative configuration of the FSGroupStrategyOptions type for use with -// apply. -func FSGroupStrategyOptions() *FSGroupStrategyOptionsApplyConfiguration { - return &FSGroupStrategyOptionsApplyConfiguration{} -} - -// WithRule sets the Rule field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Rule field is set to the value of the last call. -func (b *FSGroupStrategyOptionsApplyConfiguration) WithRule(value v1beta1.FSGroupStrategyType) *FSGroupStrategyOptionsApplyConfiguration { - b.Rule = &value - return b -} - -// WithRanges adds the given value to the Ranges field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Ranges field. -func (b *FSGroupStrategyOptionsApplyConfiguration) WithRanges(values ...*IDRangeApplyConfiguration) *FSGroupStrategyOptionsApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithRanges") - } - b.Ranges = append(b.Ranges, *values[i]) - } - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/hostportrange.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/hostportrange.go deleted file mode 100644 index 7c79688139..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/hostportrange.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -// HostPortRangeApplyConfiguration represents an declarative configuration of the HostPortRange type for use -// with apply. -type HostPortRangeApplyConfiguration struct { - Min *int32 `json:"min,omitempty"` - Max *int32 `json:"max,omitempty"` -} - -// HostPortRangeApplyConfiguration constructs an declarative configuration of the HostPortRange type for use with -// apply. -func HostPortRange() *HostPortRangeApplyConfiguration { - return &HostPortRangeApplyConfiguration{} -} - -// WithMin sets the Min field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Min field is set to the value of the last call. -func (b *HostPortRangeApplyConfiguration) WithMin(value int32) *HostPortRangeApplyConfiguration { - b.Min = &value - return b -} - -// WithMax sets the Max field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Max field is set to the value of the last call. -func (b *HostPortRangeApplyConfiguration) WithMax(value int32) *HostPortRangeApplyConfiguration { - b.Max = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/idrange.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/idrange.go deleted file mode 100644 index af46f76581..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/idrange.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -// IDRangeApplyConfiguration represents an declarative configuration of the IDRange type for use -// with apply. -type IDRangeApplyConfiguration struct { - Min *int64 `json:"min,omitempty"` - Max *int64 `json:"max,omitempty"` -} - -// IDRangeApplyConfiguration constructs an declarative configuration of the IDRange type for use with -// apply. -func IDRange() *IDRangeApplyConfiguration { - return &IDRangeApplyConfiguration{} -} - -// WithMin sets the Min field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Min field is set to the value of the last call. -func (b *IDRangeApplyConfiguration) WithMin(value int64) *IDRangeApplyConfiguration { - b.Min = &value - return b -} - -// WithMax sets the Max field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Max field is set to the value of the last call. -func (b *IDRangeApplyConfiguration) WithMax(value int64) *IDRangeApplyConfiguration { - b.Max = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicyspec.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicyspec.go deleted file mode 100644 index de3949dc92..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicyspec.go +++ /dev/null @@ -1,285 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1 "k8s.io/api/core/v1" - v1beta1 "k8s.io/api/extensions/v1beta1" -) - -// PodSecurityPolicySpecApplyConfiguration represents an declarative configuration of the PodSecurityPolicySpec type for use -// with apply. -type PodSecurityPolicySpecApplyConfiguration struct { - Privileged *bool `json:"privileged,omitempty"` - DefaultAddCapabilities []v1.Capability `json:"defaultAddCapabilities,omitempty"` - RequiredDropCapabilities []v1.Capability `json:"requiredDropCapabilities,omitempty"` - AllowedCapabilities []v1.Capability `json:"allowedCapabilities,omitempty"` - Volumes []v1beta1.FSType `json:"volumes,omitempty"` - HostNetwork *bool `json:"hostNetwork,omitempty"` - HostPorts []HostPortRangeApplyConfiguration `json:"hostPorts,omitempty"` - HostPID *bool `json:"hostPID,omitempty"` - HostIPC *bool `json:"hostIPC,omitempty"` - SELinux *SELinuxStrategyOptionsApplyConfiguration `json:"seLinux,omitempty"` - RunAsUser *RunAsUserStrategyOptionsApplyConfiguration `json:"runAsUser,omitempty"` - RunAsGroup *RunAsGroupStrategyOptionsApplyConfiguration `json:"runAsGroup,omitempty"` - SupplementalGroups *SupplementalGroupsStrategyOptionsApplyConfiguration `json:"supplementalGroups,omitempty"` - FSGroup *FSGroupStrategyOptionsApplyConfiguration `json:"fsGroup,omitempty"` - ReadOnlyRootFilesystem *bool `json:"readOnlyRootFilesystem,omitempty"` - DefaultAllowPrivilegeEscalation *bool `json:"defaultAllowPrivilegeEscalation,omitempty"` - AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty"` - AllowedHostPaths []AllowedHostPathApplyConfiguration `json:"allowedHostPaths,omitempty"` - AllowedFlexVolumes []AllowedFlexVolumeApplyConfiguration `json:"allowedFlexVolumes,omitempty"` - AllowedCSIDrivers []AllowedCSIDriverApplyConfiguration `json:"allowedCSIDrivers,omitempty"` - AllowedUnsafeSysctls []string `json:"allowedUnsafeSysctls,omitempty"` - ForbiddenSysctls []string `json:"forbiddenSysctls,omitempty"` - AllowedProcMountTypes []v1.ProcMountType `json:"allowedProcMountTypes,omitempty"` - RuntimeClass *RuntimeClassStrategyOptionsApplyConfiguration `json:"runtimeClass,omitempty"` -} - -// PodSecurityPolicySpecApplyConfiguration constructs an declarative configuration of the PodSecurityPolicySpec type for use with -// apply. -func PodSecurityPolicySpec() *PodSecurityPolicySpecApplyConfiguration { - return &PodSecurityPolicySpecApplyConfiguration{} -} - -// WithPrivileged sets the Privileged field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Privileged field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithPrivileged(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.Privileged = &value - return b -} - -// WithDefaultAddCapabilities adds the given value to the DefaultAddCapabilities field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the DefaultAddCapabilities field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithDefaultAddCapabilities(values ...v1.Capability) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.DefaultAddCapabilities = append(b.DefaultAddCapabilities, values[i]) - } - return b -} - -// WithRequiredDropCapabilities adds the given value to the RequiredDropCapabilities field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the RequiredDropCapabilities field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithRequiredDropCapabilities(values ...v1.Capability) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.RequiredDropCapabilities = append(b.RequiredDropCapabilities, values[i]) - } - return b -} - -// WithAllowedCapabilities adds the given value to the AllowedCapabilities field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedCapabilities field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedCapabilities(values ...v1.Capability) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.AllowedCapabilities = append(b.AllowedCapabilities, values[i]) - } - return b -} - -// WithVolumes adds the given value to the Volumes field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Volumes field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithVolumes(values ...v1beta1.FSType) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.Volumes = append(b.Volumes, values[i]) - } - return b -} - -// WithHostNetwork sets the HostNetwork field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the HostNetwork field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithHostNetwork(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.HostNetwork = &value - return b -} - -// WithHostPorts adds the given value to the HostPorts field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the HostPorts field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithHostPorts(values ...*HostPortRangeApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithHostPorts") - } - b.HostPorts = append(b.HostPorts, *values[i]) - } - return b -} - -// WithHostPID sets the HostPID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the HostPID field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithHostPID(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.HostPID = &value - return b -} - -// WithHostIPC sets the HostIPC field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the HostIPC field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithHostIPC(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.HostIPC = &value - return b -} - -// WithSELinux sets the SELinux field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SELinux field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithSELinux(value *SELinuxStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.SELinux = value - return b -} - -// WithRunAsUser sets the RunAsUser field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the RunAsUser field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithRunAsUser(value *RunAsUserStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.RunAsUser = value - return b -} - -// WithRunAsGroup sets the RunAsGroup field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the RunAsGroup field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithRunAsGroup(value *RunAsGroupStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.RunAsGroup = value - return b -} - -// WithSupplementalGroups sets the SupplementalGroups field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SupplementalGroups field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithSupplementalGroups(value *SupplementalGroupsStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.SupplementalGroups = value - return b -} - -// WithFSGroup sets the FSGroup field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the FSGroup field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithFSGroup(value *FSGroupStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.FSGroup = value - return b -} - -// WithReadOnlyRootFilesystem sets the ReadOnlyRootFilesystem field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ReadOnlyRootFilesystem field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithReadOnlyRootFilesystem(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.ReadOnlyRootFilesystem = &value - return b -} - -// WithDefaultAllowPrivilegeEscalation sets the DefaultAllowPrivilegeEscalation field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DefaultAllowPrivilegeEscalation field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithDefaultAllowPrivilegeEscalation(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.DefaultAllowPrivilegeEscalation = &value - return b -} - -// WithAllowPrivilegeEscalation sets the AllowPrivilegeEscalation field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the AllowPrivilegeEscalation field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowPrivilegeEscalation(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.AllowPrivilegeEscalation = &value - return b -} - -// WithAllowedHostPaths adds the given value to the AllowedHostPaths field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedHostPaths field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedHostPaths(values ...*AllowedHostPathApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithAllowedHostPaths") - } - b.AllowedHostPaths = append(b.AllowedHostPaths, *values[i]) - } - return b -} - -// WithAllowedFlexVolumes adds the given value to the AllowedFlexVolumes field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedFlexVolumes field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedFlexVolumes(values ...*AllowedFlexVolumeApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithAllowedFlexVolumes") - } - b.AllowedFlexVolumes = append(b.AllowedFlexVolumes, *values[i]) - } - return b -} - -// WithAllowedCSIDrivers adds the given value to the AllowedCSIDrivers field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedCSIDrivers field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedCSIDrivers(values ...*AllowedCSIDriverApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithAllowedCSIDrivers") - } - b.AllowedCSIDrivers = append(b.AllowedCSIDrivers, *values[i]) - } - return b -} - -// WithAllowedUnsafeSysctls adds the given value to the AllowedUnsafeSysctls field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedUnsafeSysctls field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedUnsafeSysctls(values ...string) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.AllowedUnsafeSysctls = append(b.AllowedUnsafeSysctls, values[i]) - } - return b -} - -// WithForbiddenSysctls adds the given value to the ForbiddenSysctls field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the ForbiddenSysctls field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithForbiddenSysctls(values ...string) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.ForbiddenSysctls = append(b.ForbiddenSysctls, values[i]) - } - return b -} - -// WithAllowedProcMountTypes adds the given value to the AllowedProcMountTypes field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedProcMountTypes field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedProcMountTypes(values ...v1.ProcMountType) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.AllowedProcMountTypes = append(b.AllowedProcMountTypes, values[i]) - } - return b -} - -// WithRuntimeClass sets the RuntimeClass field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the RuntimeClass field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithRuntimeClass(value *RuntimeClassStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.RuntimeClass = value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasgroupstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasgroupstrategyoptions.go deleted file mode 100644 index 75e76e85fd..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasgroupstrategyoptions.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" -) - -// RunAsGroupStrategyOptionsApplyConfiguration represents an declarative configuration of the RunAsGroupStrategyOptions type for use -// with apply. -type RunAsGroupStrategyOptionsApplyConfiguration struct { - Rule *v1beta1.RunAsGroupStrategy `json:"rule,omitempty"` - Ranges []IDRangeApplyConfiguration `json:"ranges,omitempty"` -} - -// RunAsGroupStrategyOptionsApplyConfiguration constructs an declarative configuration of the RunAsGroupStrategyOptions type for use with -// apply. -func RunAsGroupStrategyOptions() *RunAsGroupStrategyOptionsApplyConfiguration { - return &RunAsGroupStrategyOptionsApplyConfiguration{} -} - -// WithRule sets the Rule field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Rule field is set to the value of the last call. -func (b *RunAsGroupStrategyOptionsApplyConfiguration) WithRule(value v1beta1.RunAsGroupStrategy) *RunAsGroupStrategyOptionsApplyConfiguration { - b.Rule = &value - return b -} - -// WithRanges adds the given value to the Ranges field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Ranges field. -func (b *RunAsGroupStrategyOptionsApplyConfiguration) WithRanges(values ...*IDRangeApplyConfiguration) *RunAsGroupStrategyOptionsApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithRanges") - } - b.Ranges = append(b.Ranges, *values[i]) - } - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasuserstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasuserstrategyoptions.go deleted file mode 100644 index 712c1675ac..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasuserstrategyoptions.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" -) - -// RunAsUserStrategyOptionsApplyConfiguration represents an declarative configuration of the RunAsUserStrategyOptions type for use -// with apply. -type RunAsUserStrategyOptionsApplyConfiguration struct { - Rule *v1beta1.RunAsUserStrategy `json:"rule,omitempty"` - Ranges []IDRangeApplyConfiguration `json:"ranges,omitempty"` -} - -// RunAsUserStrategyOptionsApplyConfiguration constructs an declarative configuration of the RunAsUserStrategyOptions type for use with -// apply. -func RunAsUserStrategyOptions() *RunAsUserStrategyOptionsApplyConfiguration { - return &RunAsUserStrategyOptionsApplyConfiguration{} -} - -// WithRule sets the Rule field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Rule field is set to the value of the last call. -func (b *RunAsUserStrategyOptionsApplyConfiguration) WithRule(value v1beta1.RunAsUserStrategy) *RunAsUserStrategyOptionsApplyConfiguration { - b.Rule = &value - return b -} - -// WithRanges adds the given value to the Ranges field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Ranges field. -func (b *RunAsUserStrategyOptionsApplyConfiguration) WithRanges(values ...*IDRangeApplyConfiguration) *RunAsUserStrategyOptionsApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithRanges") - } - b.Ranges = append(b.Ranges, *values[i]) - } - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runtimeclassstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runtimeclassstrategyoptions.go deleted file mode 100644 index c19a7ce617..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runtimeclassstrategyoptions.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -// RuntimeClassStrategyOptionsApplyConfiguration represents an declarative configuration of the RuntimeClassStrategyOptions type for use -// with apply. -type RuntimeClassStrategyOptionsApplyConfiguration struct { - AllowedRuntimeClassNames []string `json:"allowedRuntimeClassNames,omitempty"` - DefaultRuntimeClassName *string `json:"defaultRuntimeClassName,omitempty"` -} - -// RuntimeClassStrategyOptionsApplyConfiguration constructs an declarative configuration of the RuntimeClassStrategyOptions type for use with -// apply. -func RuntimeClassStrategyOptions() *RuntimeClassStrategyOptionsApplyConfiguration { - return &RuntimeClassStrategyOptionsApplyConfiguration{} -} - -// WithAllowedRuntimeClassNames adds the given value to the AllowedRuntimeClassNames field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedRuntimeClassNames field. -func (b *RuntimeClassStrategyOptionsApplyConfiguration) WithAllowedRuntimeClassNames(values ...string) *RuntimeClassStrategyOptionsApplyConfiguration { - for i := range values { - b.AllowedRuntimeClassNames = append(b.AllowedRuntimeClassNames, values[i]) - } - return b -} - -// WithDefaultRuntimeClassName sets the DefaultRuntimeClassName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DefaultRuntimeClassName field is set to the value of the last call. -func (b *RuntimeClassStrategyOptionsApplyConfiguration) WithDefaultRuntimeClassName(value string) *RuntimeClassStrategyOptionsApplyConfiguration { - b.DefaultRuntimeClassName = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/selinuxstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/selinuxstrategyoptions.go deleted file mode 100644 index 265906a73a..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/selinuxstrategyoptions.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" - v1 "k8s.io/client-go/applyconfigurations/core/v1" -) - -// SELinuxStrategyOptionsApplyConfiguration represents an declarative configuration of the SELinuxStrategyOptions type for use -// with apply. -type SELinuxStrategyOptionsApplyConfiguration struct { - Rule *v1beta1.SELinuxStrategy `json:"rule,omitempty"` - SELinuxOptions *v1.SELinuxOptionsApplyConfiguration `json:"seLinuxOptions,omitempty"` -} - -// SELinuxStrategyOptionsApplyConfiguration constructs an declarative configuration of the SELinuxStrategyOptions type for use with -// apply. -func SELinuxStrategyOptions() *SELinuxStrategyOptionsApplyConfiguration { - return &SELinuxStrategyOptionsApplyConfiguration{} -} - -// WithRule sets the Rule field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Rule field is set to the value of the last call. -func (b *SELinuxStrategyOptionsApplyConfiguration) WithRule(value v1beta1.SELinuxStrategy) *SELinuxStrategyOptionsApplyConfiguration { - b.Rule = &value - return b -} - -// WithSELinuxOptions sets the SELinuxOptions field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SELinuxOptions field is set to the value of the last call. -func (b *SELinuxStrategyOptionsApplyConfiguration) WithSELinuxOptions(value *v1.SELinuxOptionsApplyConfiguration) *SELinuxStrategyOptionsApplyConfiguration { - b.SELinuxOptions = value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/supplementalgroupsstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/supplementalgroupsstrategyoptions.go deleted file mode 100644 index ec43138124..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/supplementalgroupsstrategyoptions.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" -) - -// SupplementalGroupsStrategyOptionsApplyConfiguration represents an declarative configuration of the SupplementalGroupsStrategyOptions type for use -// with apply. -type SupplementalGroupsStrategyOptionsApplyConfiguration struct { - Rule *v1beta1.SupplementalGroupsStrategyType `json:"rule,omitempty"` - Ranges []IDRangeApplyConfiguration `json:"ranges,omitempty"` -} - -// SupplementalGroupsStrategyOptionsApplyConfiguration constructs an declarative configuration of the SupplementalGroupsStrategyOptions type for use with -// apply. -func SupplementalGroupsStrategyOptions() *SupplementalGroupsStrategyOptionsApplyConfiguration { - return &SupplementalGroupsStrategyOptionsApplyConfiguration{} -} - -// WithRule sets the Rule field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Rule field is set to the value of the last call. -func (b *SupplementalGroupsStrategyOptionsApplyConfiguration) WithRule(value v1beta1.SupplementalGroupsStrategyType) *SupplementalGroupsStrategyOptionsApplyConfiguration { - b.Rule = &value - return b -} - -// WithRanges adds the given value to the Ranges field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Ranges field. -func (b *SupplementalGroupsStrategyOptionsApplyConfiguration) WithRanges(values ...*IDRangeApplyConfiguration) *SupplementalGroupsStrategyOptionsApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithRanges") - } - b.Ranges = append(b.Ranges, *values[i]) - } - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go b/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go index 94dd2160d5..361b2f4e85 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go +++ b/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go @@ -39,6 +39,17 @@ func Parser() *typed.Parser { var parserOnce sync.Once var parser *typed.Parser var schemaYAML = typed.YAMLObject(`types: +- name: io.k8s.api.admissionregistration.v1.MatchCondition + map: + fields: + - name: expression + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" - name: io.k8s.api.admissionregistration.v1.MutatingWebhook map: fields: @@ -55,6 +66,14 @@ var schemaYAML = typed.YAMLObject(`types: - name: failurePolicy type: scalar: string + - name: matchConditions + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1.MatchCondition + elementRelationship: associative + keys: + - name - name: matchPolicy type: scalar: string @@ -167,6 +186,14 @@ var schemaYAML = typed.YAMLObject(`types: - name: failurePolicy type: scalar: string + - name: matchConditions + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1.MatchCondition + elementRelationship: associative + keys: + - name - name: matchPolicy type: scalar: string @@ -225,6 +252,39 @@ var schemaYAML = typed.YAMLObject(`types: - name: url type: scalar: string +- name: io.k8s.api.admissionregistration.v1alpha1.AuditAnnotation + map: + fields: + - name: key + type: + scalar: string + default: "" + - name: valueExpression + type: + scalar: string + default: "" +- name: io.k8s.api.admissionregistration.v1alpha1.ExpressionWarning + map: + fields: + - name: fieldRef + type: + scalar: string + default: "" + - name: warning + type: + scalar: string + default: "" +- name: io.k8s.api.admissionregistration.v1alpha1.MatchCondition + map: + fields: + - name: expression + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" - name: io.k8s.api.admissionregistration.v1alpha1.MatchResources map: fields: @@ -307,6 +367,15 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string elementRelationship: atomic +- name: io.k8s.api.admissionregistration.v1alpha1.TypeChecking + map: + fields: + - name: expressionWarnings + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1alpha1.ExpressionWarning + elementRelationship: atomic - name: io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicy map: fields: @@ -324,6 +393,10 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicySpec default: {} + - name: status + type: + namedType: io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyStatus + default: {} - name: io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyBinding map: fields: @@ -353,12 +426,32 @@ var schemaYAML = typed.YAMLObject(`types: - name: policyName type: scalar: string + - name: validationActions + type: + list: + elementType: + scalar: string + elementRelationship: associative - name: io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicySpec map: fields: + - name: auditAnnotations + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1alpha1.AuditAnnotation + elementRelationship: atomic - name: failurePolicy type: scalar: string + - name: matchConditions + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1alpha1.MatchCondition + elementRelationship: associative + keys: + - name - name: matchConstraints type: namedType: io.k8s.api.admissionregistration.v1alpha1.MatchResources @@ -371,6 +464,23 @@ var schemaYAML = typed.YAMLObject(`types: elementType: namedType: io.k8s.api.admissionregistration.v1alpha1.Validation elementRelationship: atomic +- name: io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition + elementRelationship: associative + keys: + - type + - name: observedGeneration + type: + scalar: numeric + - name: typeChecking + type: + namedType: io.k8s.api.admissionregistration.v1alpha1.TypeChecking - name: io.k8s.api.admissionregistration.v1alpha1.Validation map: fields: @@ -381,9 +491,23 @@ var schemaYAML = typed.YAMLObject(`types: - name: message type: scalar: string + - name: messageExpression + type: + scalar: string - name: reason type: scalar: string +- name: io.k8s.api.admissionregistration.v1beta1.MatchCondition + map: + fields: + - name: expression + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" - name: io.k8s.api.admissionregistration.v1beta1.MutatingWebhook map: fields: @@ -400,6 +524,14 @@ var schemaYAML = typed.YAMLObject(`types: - name: failurePolicy type: scalar: string + - name: matchConditions + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1beta1.MatchCondition + elementRelationship: associative + keys: + - name - name: matchPolicy type: scalar: string @@ -482,6 +614,14 @@ var schemaYAML = typed.YAMLObject(`types: - name: failurePolicy type: scalar: string + - name: matchConditions + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1beta1.MatchCondition + elementRelationship: associative + keys: + - name - name: matchPolicy type: scalar: string @@ -3502,6 +3642,33 @@ var schemaYAML = typed.YAMLObject(`types: elementRelationship: associative keys: - type +- name: io.k8s.api.certificates.v1alpha1.ClusterTrustBundle + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.certificates.v1alpha1.ClusterTrustBundleSpec + default: {} +- name: io.k8s.api.certificates.v1alpha1.ClusterTrustBundleSpec + map: + fields: + - name: signerName + type: + scalar: string + - name: trustBundle + type: + scalar: string + default: "" - name: io.k8s.api.certificates.v1beta1.CertificateSigningRequest map: fields: @@ -4129,6 +4296,12 @@ var schemaYAML = typed.YAMLObject(`types: - name: readinessProbe type: namedType: io.k8s.api.core.v1.Probe + - name: resizePolicy + type: + list: + elementType: + namedType: io.k8s.api.core.v1.ContainerResizePolicy + elementRelationship: atomic - name: resources type: namedType: io.k8s.api.core.v1.ResourceRequirements @@ -4205,6 +4378,17 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: TCP +- name: io.k8s.api.core.v1.ContainerResizePolicy + map: + fields: + - name: resourceName + type: + scalar: string + default: "" + - name: restartPolicy + type: + scalar: string + default: "" - name: io.k8s.api.core.v1.ContainerState map: fields: @@ -4263,6 +4447,11 @@ var schemaYAML = typed.YAMLObject(`types: - name: io.k8s.api.core.v1.ContainerStatus map: fields: + - name: allocatedResources + type: + map: + elementType: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - name: containerID type: scalar: string @@ -4286,6 +4475,9 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: boolean default: false + - name: resources + type: + namedType: io.k8s.api.core.v1.ResourceRequirements - name: restartCount type: scalar: numeric @@ -4521,6 +4713,12 @@ var schemaYAML = typed.YAMLObject(`types: - name: readinessProbe type: namedType: io.k8s.api.core.v1.Probe + - name: resizePolicy + type: + list: + elementType: + namedType: io.k8s.api.core.v1.ContainerResizePolicy + elementRelationship: atomic - name: resources type: namedType: io.k8s.api.core.v1.ResourceRequirements @@ -6185,6 +6383,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: reason type: scalar: string + - name: resize + type: + scalar: string - name: startTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time @@ -7734,29 +7935,6 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime default: {} -- name: io.k8s.api.extensions.v1beta1.AllowedCSIDriver - map: - fields: - - name: name - type: - scalar: string - default: "" -- name: io.k8s.api.extensions.v1beta1.AllowedFlexVolume - map: - fields: - - name: driver - type: - scalar: string - default: "" -- name: io.k8s.api.extensions.v1beta1.AllowedHostPath - map: - fields: - - name: pathPrefix - type: - scalar: string - - name: readOnly - type: - scalar: boolean - name: io.k8s.api.extensions.v1beta1.DaemonSet map: fields: @@ -7992,18 +8170,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: type type: scalar: string -- name: io.k8s.api.extensions.v1beta1.FSGroupStrategyOptions - map: - fields: - - name: ranges - type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.IDRange - elementRelationship: atomic - - name: rule - type: - scalar: string - name: io.k8s.api.extensions.v1beta1.HTTPIngressPath map: fields: @@ -8026,28 +8192,6 @@ var schemaYAML = typed.YAMLObject(`types: elementType: namedType: io.k8s.api.extensions.v1beta1.HTTPIngressPath elementRelationship: atomic -- name: io.k8s.api.extensions.v1beta1.HostPortRange - map: - fields: - - name: max - type: - scalar: numeric - default: 0 - - name: min - type: - scalar: numeric - default: 0 -- name: io.k8s.api.extensions.v1beta1.IDRange - map: - fields: - - name: max - type: - scalar: numeric - default: 0 - - name: min - type: - scalar: numeric - default: 0 - name: io.k8s.api.extensions.v1beta1.IPBlock map: fields: @@ -8293,135 +8437,6 @@ var schemaYAML = typed.YAMLObject(`types: elementRelationship: associative keys: - type -- name: io.k8s.api.extensions.v1beta1.PodSecurityPolicy - map: - fields: - - name: apiVersion - type: - scalar: string - - name: kind - type: - scalar: string - - name: metadata - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta - default: {} - - name: spec - type: - namedType: io.k8s.api.extensions.v1beta1.PodSecurityPolicySpec - default: {} -- name: io.k8s.api.extensions.v1beta1.PodSecurityPolicySpec - map: - fields: - - name: allowPrivilegeEscalation - type: - scalar: boolean - - name: allowedCSIDrivers - type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.AllowedCSIDriver - elementRelationship: atomic - - name: allowedCapabilities - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: allowedFlexVolumes - type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.AllowedFlexVolume - elementRelationship: atomic - - name: allowedHostPaths - type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.AllowedHostPath - elementRelationship: atomic - - name: allowedProcMountTypes - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: allowedUnsafeSysctls - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: defaultAddCapabilities - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: defaultAllowPrivilegeEscalation - type: - scalar: boolean - - name: forbiddenSysctls - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: fsGroup - type: - namedType: io.k8s.api.extensions.v1beta1.FSGroupStrategyOptions - default: {} - - name: hostIPC - type: - scalar: boolean - - name: hostNetwork - type: - scalar: boolean - - name: hostPID - type: - scalar: boolean - - name: hostPorts - type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.HostPortRange - elementRelationship: atomic - - name: privileged - type: - scalar: boolean - - name: readOnlyRootFilesystem - type: - scalar: boolean - - name: requiredDropCapabilities - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: runAsGroup - type: - namedType: io.k8s.api.extensions.v1beta1.RunAsGroupStrategyOptions - - name: runAsUser - type: - namedType: io.k8s.api.extensions.v1beta1.RunAsUserStrategyOptions - default: {} - - name: runtimeClass - type: - namedType: io.k8s.api.extensions.v1beta1.RuntimeClassStrategyOptions - - name: seLinux - type: - namedType: io.k8s.api.extensions.v1beta1.SELinuxStrategyOptions - default: {} - - name: supplementalGroups - type: - namedType: io.k8s.api.extensions.v1beta1.SupplementalGroupsStrategyOptions - default: {} - - name: volumes - type: - list: - elementType: - scalar: string - elementRelationship: atomic - name: io.k8s.api.extensions.v1beta1.ReplicaSet map: fields: @@ -8531,66 +8546,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: maxUnavailable type: namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString -- name: io.k8s.api.extensions.v1beta1.RunAsGroupStrategyOptions - map: - fields: - - name: ranges - type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.IDRange - elementRelationship: atomic - - name: rule - type: - scalar: string - default: "" -- name: io.k8s.api.extensions.v1beta1.RunAsUserStrategyOptions - map: - fields: - - name: ranges - type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.IDRange - elementRelationship: atomic - - name: rule - type: - scalar: string - default: "" -- name: io.k8s.api.extensions.v1beta1.RuntimeClassStrategyOptions - map: - fields: - - name: allowedRuntimeClassNames - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: defaultRuntimeClassName - type: - scalar: string -- name: io.k8s.api.extensions.v1beta1.SELinuxStrategyOptions - map: - fields: - - name: rule - type: - scalar: string - default: "" - - name: seLinuxOptions - type: - namedType: io.k8s.api.core.v1.SELinuxOptions -- name: io.k8s.api.extensions.v1beta1.SupplementalGroupsStrategyOptions - map: - fields: - - name: ranges - type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.IDRange - elementRelationship: atomic - - name: rule - type: - scalar: string - name: io.k8s.api.flowcontrol.v1alpha1.FlowDistinguisherMethod map: fields: @@ -10270,6 +10225,47 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: numeric default: 0 +- name: io.k8s.api.networking.v1alpha1.IPAddress + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.networking.v1alpha1.IPAddressSpec + default: {} +- name: io.k8s.api.networking.v1alpha1.IPAddressSpec + map: + fields: + - name: parentRef + type: + namedType: io.k8s.api.networking.v1alpha1.ParentReference +- name: io.k8s.api.networking.v1alpha1.ParentReference + map: + fields: + - name: group + type: + scalar: string + - name: name + type: + scalar: string + - name: namespace + type: + scalar: string + - name: resource + type: + scalar: string + - name: uid + type: + scalar: string - name: io.k8s.api.networking.v1beta1.HTTPIngressPath map: fields: @@ -11509,19 +11505,22 @@ var schemaYAML = typed.YAMLObject(`types: - name: namespace type: scalar: string -- name: io.k8s.api.resource.v1alpha1.AllocationResult +- name: io.k8s.api.resource.v1alpha2.AllocationResult map: fields: - name: availableOnNodes type: namedType: io.k8s.api.core.v1.NodeSelector - - name: resourceHandle + - name: resourceHandles type: - scalar: string + list: + elementType: + namedType: io.k8s.api.resource.v1alpha2.ResourceHandle + elementRelationship: atomic - name: shareable type: scalar: boolean -- name: io.k8s.api.resource.v1alpha1.PodScheduling +- name: io.k8s.api.resource.v1alpha2.PodSchedulingContext map: fields: - name: apiVersion @@ -11536,13 +11535,13 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.resource.v1alpha1.PodSchedulingSpec + namedType: io.k8s.api.resource.v1alpha2.PodSchedulingContextSpec default: {} - name: status type: - namedType: io.k8s.api.resource.v1alpha1.PodSchedulingStatus + namedType: io.k8s.api.resource.v1alpha2.PodSchedulingContextStatus default: {} -- name: io.k8s.api.resource.v1alpha1.PodSchedulingSpec +- name: io.k8s.api.resource.v1alpha2.PodSchedulingContextSpec map: fields: - name: potentialNodes @@ -11554,18 +11553,18 @@ var schemaYAML = typed.YAMLObject(`types: - name: selectedNode type: scalar: string -- name: io.k8s.api.resource.v1alpha1.PodSchedulingStatus +- name: io.k8s.api.resource.v1alpha2.PodSchedulingContextStatus map: fields: - name: resourceClaims type: list: elementType: - namedType: io.k8s.api.resource.v1alpha1.ResourceClaimSchedulingStatus + namedType: io.k8s.api.resource.v1alpha2.ResourceClaimSchedulingStatus elementRelationship: associative keys: - name -- name: io.k8s.api.resource.v1alpha1.ResourceClaim +- name: io.k8s.api.resource.v1alpha2.ResourceClaim map: fields: - name: apiVersion @@ -11580,13 +11579,13 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.resource.v1alpha1.ResourceClaimSpec + namedType: io.k8s.api.resource.v1alpha2.ResourceClaimSpec default: {} - name: status type: - namedType: io.k8s.api.resource.v1alpha1.ResourceClaimStatus + namedType: io.k8s.api.resource.v1alpha2.ResourceClaimStatus default: {} -- name: io.k8s.api.resource.v1alpha1.ResourceClaimConsumerReference +- name: io.k8s.api.resource.v1alpha2.ResourceClaimConsumerReference map: fields: - name: apiGroup @@ -11604,7 +11603,7 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.resource.v1alpha1.ResourceClaimParametersReference +- name: io.k8s.api.resource.v1alpha2.ResourceClaimParametersReference map: fields: - name: apiGroup @@ -11618,7 +11617,7 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.resource.v1alpha1.ResourceClaimSchedulingStatus +- name: io.k8s.api.resource.v1alpha2.ResourceClaimSchedulingStatus map: fields: - name: name @@ -11630,7 +11629,7 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: associative -- name: io.k8s.api.resource.v1alpha1.ResourceClaimSpec +- name: io.k8s.api.resource.v1alpha2.ResourceClaimSpec map: fields: - name: allocationMode @@ -11638,17 +11637,17 @@ var schemaYAML = typed.YAMLObject(`types: scalar: string - name: parametersRef type: - namedType: io.k8s.api.resource.v1alpha1.ResourceClaimParametersReference + namedType: io.k8s.api.resource.v1alpha2.ResourceClaimParametersReference - name: resourceClassName type: scalar: string default: "" -- name: io.k8s.api.resource.v1alpha1.ResourceClaimStatus +- name: io.k8s.api.resource.v1alpha2.ResourceClaimStatus map: fields: - name: allocation type: - namedType: io.k8s.api.resource.v1alpha1.AllocationResult + namedType: io.k8s.api.resource.v1alpha2.AllocationResult - name: deallocationRequested type: scalar: boolean @@ -11659,11 +11658,11 @@ var schemaYAML = typed.YAMLObject(`types: type: list: elementType: - namedType: io.k8s.api.resource.v1alpha1.ResourceClaimConsumerReference + namedType: io.k8s.api.resource.v1alpha2.ResourceClaimConsumerReference elementRelationship: associative keys: - uid -- name: io.k8s.api.resource.v1alpha1.ResourceClaimTemplate +- name: io.k8s.api.resource.v1alpha2.ResourceClaimTemplate map: fields: - name: apiVersion @@ -11678,9 +11677,9 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.resource.v1alpha1.ResourceClaimTemplateSpec + namedType: io.k8s.api.resource.v1alpha2.ResourceClaimTemplateSpec default: {} -- name: io.k8s.api.resource.v1alpha1.ResourceClaimTemplateSpec +- name: io.k8s.api.resource.v1alpha2.ResourceClaimTemplateSpec map: fields: - name: metadata @@ -11689,9 +11688,9 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.resource.v1alpha1.ResourceClaimSpec + namedType: io.k8s.api.resource.v1alpha2.ResourceClaimSpec default: {} -- name: io.k8s.api.resource.v1alpha1.ResourceClass +- name: io.k8s.api.resource.v1alpha2.ResourceClass map: fields: - name: apiVersion @@ -11710,11 +11709,11 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: parametersRef type: - namedType: io.k8s.api.resource.v1alpha1.ResourceClassParametersReference + namedType: io.k8s.api.resource.v1alpha2.ResourceClassParametersReference - name: suitableNodes type: namedType: io.k8s.api.core.v1.NodeSelector -- name: io.k8s.api.resource.v1alpha1.ResourceClassParametersReference +- name: io.k8s.api.resource.v1alpha2.ResourceClassParametersReference map: fields: - name: apiGroup @@ -11731,6 +11730,15 @@ var schemaYAML = typed.YAMLObject(`types: - name: namespace type: scalar: string +- name: io.k8s.api.resource.v1alpha2.ResourceHandle + map: + fields: + - name: data + type: + scalar: string + - name: driverName + type: + scalar: string - name: io.k8s.api.scheduling.v1.PriorityClass map: fields: diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/groupversionkind.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/groupversionkind.go deleted file mode 100644 index f400e51649..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/groupversionkind.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1 - -// GroupVersionKindApplyConfiguration represents an declarative configuration of the GroupVersionKind type for use -// with apply. -type GroupVersionKindApplyConfiguration struct { - Group *string `json:"group,omitempty"` - Version *string `json:"version,omitempty"` - Kind *string `json:"kind,omitempty"` -} - -// GroupVersionKindApplyConfiguration constructs an declarative configuration of the GroupVersionKind type for use with -// apply. -func GroupVersionKind() *GroupVersionKindApplyConfiguration { - return &GroupVersionKindApplyConfiguration{} -} - -// WithGroup sets the Group field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Group field is set to the value of the last call. -func (b *GroupVersionKindApplyConfiguration) WithGroup(value string) *GroupVersionKindApplyConfiguration { - b.Group = &value - return b -} - -// WithVersion sets the Version field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Version field is set to the value of the last call. -func (b *GroupVersionKindApplyConfiguration) WithVersion(value string) *GroupVersionKindApplyConfiguration { - b.Version = &value - return b -} - -// WithKind sets the Kind field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Kind field is set to the value of the last call. -func (b *GroupVersionKindApplyConfiguration) WithKind(value string) *GroupVersionKindApplyConfiguration { - b.Kind = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/listmeta.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/listmeta.go deleted file mode 100644 index 5cadee3353..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/listmeta.go +++ /dev/null @@ -1,66 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1 - -// ListMetaApplyConfiguration represents an declarative configuration of the ListMeta type for use -// with apply. -type ListMetaApplyConfiguration struct { - SelfLink *string `json:"selfLink,omitempty"` - ResourceVersion *string `json:"resourceVersion,omitempty"` - Continue *string `json:"continue,omitempty"` - RemainingItemCount *int64 `json:"remainingItemCount,omitempty"` -} - -// ListMetaApplyConfiguration constructs an declarative configuration of the ListMeta type for use with -// apply. -func ListMeta() *ListMetaApplyConfiguration { - return &ListMetaApplyConfiguration{} -} - -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *ListMetaApplyConfiguration) WithSelfLink(value string) *ListMetaApplyConfiguration { - b.SelfLink = &value - return b -} - -// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ResourceVersion field is set to the value of the last call. -func (b *ListMetaApplyConfiguration) WithResourceVersion(value string) *ListMetaApplyConfiguration { - b.ResourceVersion = &value - return b -} - -// WithContinue sets the Continue field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Continue field is set to the value of the last call. -func (b *ListMetaApplyConfiguration) WithContinue(value string) *ListMetaApplyConfiguration { - b.Continue = &value - return b -} - -// WithRemainingItemCount sets the RemainingItemCount field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the RemainingItemCount field is set to the value of the last call. -func (b *ListMetaApplyConfiguration) WithRemainingItemCount(value int64) *ListMetaApplyConfiguration { - b.RemainingItemCount = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/status.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/status.go deleted file mode 100644 index 7db432089e..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/status.go +++ /dev/null @@ -1,142 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// StatusApplyConfiguration represents an declarative configuration of the Status type for use -// with apply. -type StatusApplyConfiguration struct { - TypeMetaApplyConfiguration `json:",inline"` - *ListMetaApplyConfiguration `json:"metadata,omitempty"` - Status *string `json:"status,omitempty"` - Message *string `json:"message,omitempty"` - Reason *metav1.StatusReason `json:"reason,omitempty"` - Details *StatusDetailsApplyConfiguration `json:"details,omitempty"` - Code *int32 `json:"code,omitempty"` -} - -// StatusApplyConfiguration constructs an declarative configuration of the Status type for use with -// apply. -func Status() *StatusApplyConfiguration { - b := &StatusApplyConfiguration{} - b.WithKind("Status") - b.WithAPIVersion("meta.k8s.io/v1") - return b -} - -// WithKind sets the Kind field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Kind field is set to the value of the last call. -func (b *StatusApplyConfiguration) WithKind(value string) *StatusApplyConfiguration { - b.Kind = &value - return b -} - -// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the APIVersion field is set to the value of the last call. -func (b *StatusApplyConfiguration) WithAPIVersion(value string) *StatusApplyConfiguration { - b.APIVersion = &value - return b -} - -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *StatusApplyConfiguration) WithSelfLink(value string) *StatusApplyConfiguration { - b.ensureListMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - -// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ResourceVersion field is set to the value of the last call. -func (b *StatusApplyConfiguration) WithResourceVersion(value string) *StatusApplyConfiguration { - b.ensureListMetaApplyConfigurationExists() - b.ResourceVersion = &value - return b -} - -// WithContinue sets the Continue field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Continue field is set to the value of the last call. -func (b *StatusApplyConfiguration) WithContinue(value string) *StatusApplyConfiguration { - b.ensureListMetaApplyConfigurationExists() - b.Continue = &value - return b -} - -// WithRemainingItemCount sets the RemainingItemCount field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the RemainingItemCount field is set to the value of the last call. -func (b *StatusApplyConfiguration) WithRemainingItemCount(value int64) *StatusApplyConfiguration { - b.ensureListMetaApplyConfigurationExists() - b.RemainingItemCount = &value - return b -} - -func (b *StatusApplyConfiguration) ensureListMetaApplyConfigurationExists() { - if b.ListMetaApplyConfiguration == nil { - b.ListMetaApplyConfiguration = &ListMetaApplyConfiguration{} - } -} - -// WithStatus sets the Status field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Status field is set to the value of the last call. -func (b *StatusApplyConfiguration) WithStatus(value string) *StatusApplyConfiguration { - b.Status = &value - return b -} - -// WithMessage sets the Message field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Message field is set to the value of the last call. -func (b *StatusApplyConfiguration) WithMessage(value string) *StatusApplyConfiguration { - b.Message = &value - return b -} - -// WithReason sets the Reason field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Reason field is set to the value of the last call. -func (b *StatusApplyConfiguration) WithReason(value metav1.StatusReason) *StatusApplyConfiguration { - b.Reason = &value - return b -} - -// WithDetails sets the Details field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Details field is set to the value of the last call. -func (b *StatusApplyConfiguration) WithDetails(value *StatusDetailsApplyConfiguration) *StatusApplyConfiguration { - b.Details = value - return b -} - -// WithCode sets the Code field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Code field is set to the value of the last call. -func (b *StatusApplyConfiguration) WithCode(value int32) *StatusApplyConfiguration { - b.Code = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/statuscause.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/statuscause.go deleted file mode 100644 index 7f05bca498..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/statuscause.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// StatusCauseApplyConfiguration represents an declarative configuration of the StatusCause type for use -// with apply. -type StatusCauseApplyConfiguration struct { - Type *v1.CauseType `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` - Field *string `json:"field,omitempty"` -} - -// StatusCauseApplyConfiguration constructs an declarative configuration of the StatusCause type for use with -// apply. -func StatusCause() *StatusCauseApplyConfiguration { - return &StatusCauseApplyConfiguration{} -} - -// WithType sets the Type field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Type field is set to the value of the last call. -func (b *StatusCauseApplyConfiguration) WithType(value v1.CauseType) *StatusCauseApplyConfiguration { - b.Type = &value - return b -} - -// WithMessage sets the Message field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Message field is set to the value of the last call. -func (b *StatusCauseApplyConfiguration) WithMessage(value string) *StatusCauseApplyConfiguration { - b.Message = &value - return b -} - -// WithField sets the Field field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Field field is set to the value of the last call. -func (b *StatusCauseApplyConfiguration) WithField(value string) *StatusCauseApplyConfiguration { - b.Field = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/statusdetails.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/statusdetails.go deleted file mode 100644 index a7dbaa1b26..0000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/statusdetails.go +++ /dev/null @@ -1,93 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1 - -import ( - types "k8s.io/apimachinery/pkg/types" -) - -// StatusDetailsApplyConfiguration represents an declarative configuration of the StatusDetails type for use -// with apply. -type StatusDetailsApplyConfiguration struct { - Name *string `json:"name,omitempty"` - Group *string `json:"group,omitempty"` - Kind *string `json:"kind,omitempty"` - UID *types.UID `json:"uid,omitempty"` - Causes []StatusCauseApplyConfiguration `json:"causes,omitempty"` - RetryAfterSeconds *int32 `json:"retryAfterSeconds,omitempty"` -} - -// StatusDetailsApplyConfiguration constructs an declarative configuration of the StatusDetails type for use with -// apply. -func StatusDetails() *StatusDetailsApplyConfiguration { - return &StatusDetailsApplyConfiguration{} -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *StatusDetailsApplyConfiguration) WithName(value string) *StatusDetailsApplyConfiguration { - b.Name = &value - return b -} - -// WithGroup sets the Group field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Group field is set to the value of the last call. -func (b *StatusDetailsApplyConfiguration) WithGroup(value string) *StatusDetailsApplyConfiguration { - b.Group = &value - return b -} - -// WithKind sets the Kind field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Kind field is set to the value of the last call. -func (b *StatusDetailsApplyConfiguration) WithKind(value string) *StatusDetailsApplyConfiguration { - b.Kind = &value - return b -} - -// WithUID sets the UID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the UID field is set to the value of the last call. -func (b *StatusDetailsApplyConfiguration) WithUID(value types.UID) *StatusDetailsApplyConfiguration { - b.UID = &value - return b -} - -// WithCauses adds the given value to the Causes field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Causes field. -func (b *StatusDetailsApplyConfiguration) WithCauses(values ...*StatusCauseApplyConfiguration) *StatusDetailsApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithCauses") - } - b.Causes = append(b.Causes, *values[i]) - } - return b -} - -// WithRetryAfterSeconds sets the RetryAfterSeconds field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the RetryAfterSeconds field is set to the value of the last call. -func (b *StatusDetailsApplyConfiguration) WithRetryAfterSeconds(value int32) *StatusDetailsApplyConfiguration { - b.RetryAfterSeconds = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddress.go similarity index 66% rename from vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicy.go rename to vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddress.go index c70906cfaf..da6822111d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddress.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1beta1 +package v1alpha1 import ( - extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + networkingv1alpha1 "k8s.io/api/networking/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -27,63 +27,63 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodSecurityPolicyApplyConfiguration represents an declarative configuration of the PodSecurityPolicy type for use +// IPAddressApplyConfiguration represents an declarative configuration of the IPAddress type for use // with apply. -type PodSecurityPolicyApplyConfiguration struct { +type IPAddressApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *PodSecurityPolicySpecApplyConfiguration `json:"spec,omitempty"` + Spec *IPAddressSpecApplyConfiguration `json:"spec,omitempty"` } -// PodSecurityPolicy constructs an declarative configuration of the PodSecurityPolicy type for use with +// IPAddress constructs an declarative configuration of the IPAddress type for use with // apply. -func PodSecurityPolicy(name string) *PodSecurityPolicyApplyConfiguration { - b := &PodSecurityPolicyApplyConfiguration{} +func IPAddress(name string) *IPAddressApplyConfiguration { + b := &IPAddressApplyConfiguration{} b.WithName(name) - b.WithKind("PodSecurityPolicy") - b.WithAPIVersion("extensions/v1beta1") + b.WithKind("IPAddress") + b.WithAPIVersion("networking.k8s.io/v1alpha1") return b } -// ExtractPodSecurityPolicy extracts the applied configuration owned by fieldManager from -// podSecurityPolicy. If no managedFields are found in podSecurityPolicy for fieldManager, a -// PodSecurityPolicyApplyConfiguration is returned with only the Name, Namespace (if applicable), +// ExtractIPAddress extracts the applied configuration owned by fieldManager from +// iPAddress. If no managedFields are found in iPAddress for fieldManager, a +// IPAddressApplyConfiguration is returned with only the Name, Namespace (if applicable), // APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. -// podSecurityPolicy must be a unmodified PodSecurityPolicy API object that was retrieved from the Kubernetes API. -// ExtractPodSecurityPolicy provides a way to perform a extract/modify-in-place/apply workflow. +// iPAddress must be a unmodified IPAddress API object that was retrieved from the Kubernetes API. +// ExtractIPAddress provides a way to perform a extract/modify-in-place/apply workflow. // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractPodSecurityPolicy(podSecurityPolicy *extensionsv1beta1.PodSecurityPolicy, fieldManager string) (*PodSecurityPolicyApplyConfiguration, error) { - return extractPodSecurityPolicy(podSecurityPolicy, fieldManager, "") +func ExtractIPAddress(iPAddress *networkingv1alpha1.IPAddress, fieldManager string) (*IPAddressApplyConfiguration, error) { + return extractIPAddress(iPAddress, fieldManager, "") } -// ExtractPodSecurityPolicyStatus is the same as ExtractPodSecurityPolicy except +// ExtractIPAddressStatus is the same as ExtractIPAddress except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractPodSecurityPolicyStatus(podSecurityPolicy *extensionsv1beta1.PodSecurityPolicy, fieldManager string) (*PodSecurityPolicyApplyConfiguration, error) { - return extractPodSecurityPolicy(podSecurityPolicy, fieldManager, "status") +func ExtractIPAddressStatus(iPAddress *networkingv1alpha1.IPAddress, fieldManager string) (*IPAddressApplyConfiguration, error) { + return extractIPAddress(iPAddress, fieldManager, "status") } -func extractPodSecurityPolicy(podSecurityPolicy *extensionsv1beta1.PodSecurityPolicy, fieldManager string, subresource string) (*PodSecurityPolicyApplyConfiguration, error) { - b := &PodSecurityPolicyApplyConfiguration{} - err := managedfields.ExtractInto(podSecurityPolicy, internal.Parser().Type("io.k8s.api.extensions.v1beta1.PodSecurityPolicy"), fieldManager, b, subresource) +func extractIPAddress(iPAddress *networkingv1alpha1.IPAddress, fieldManager string, subresource string) (*IPAddressApplyConfiguration, error) { + b := &IPAddressApplyConfiguration{} + err := managedfields.ExtractInto(iPAddress, internal.Parser().Type("io.k8s.api.networking.v1alpha1.IPAddress"), fieldManager, b, subresource) if err != nil { return nil, err } - b.WithName(podSecurityPolicy.Name) + b.WithName(iPAddress.Name) - b.WithKind("PodSecurityPolicy") - b.WithAPIVersion("extensions/v1beta1") + b.WithKind("IPAddress") + b.WithAPIVersion("networking.k8s.io/v1alpha1") return b, nil } // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithKind(value string) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithKind(value string) *IPAddressApplyConfiguration { b.Kind = &value return b } @@ -91,7 +91,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithKind(value string) *PodSecurit // WithAPIVersion sets the APIVersion field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithAPIVersion(value string) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithAPIVersion(value string) *IPAddressApplyConfiguration { b.APIVersion = &value return b } @@ -99,7 +99,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithAPIVersion(value string) *PodS // WithName sets the Name field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithName(value string) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithName(value string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Name = &value return b @@ -108,7 +108,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithName(value string) *PodSecurit // WithGenerateName sets the GenerateName field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the GenerateName field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithGenerateName(value string) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithGenerateName(value string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.GenerateName = &value return b @@ -117,7 +117,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithGenerateName(value string) *Po // WithNamespace sets the Namespace field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Namespace field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithNamespace(value string) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithNamespace(value string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Namespace = &value return b @@ -126,7 +126,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithNamespace(value string) *PodSe // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithUID(value types.UID) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithUID(value types.UID) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.UID = &value return b @@ -135,7 +135,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithUID(value types.UID) *PodSecur // WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ResourceVersion field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithResourceVersion(value string) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithResourceVersion(value string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.ResourceVersion = &value return b @@ -144,7 +144,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithResourceVersion(value string) // WithGeneration sets the Generation field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Generation field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithGeneration(value int64) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithGeneration(value int64) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Generation = &value return b @@ -153,7 +153,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithGeneration(value int64) *PodSe // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithCreationTimestamp(value metav1.Time) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.CreationTimestamp = &value return b @@ -162,7 +162,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithCreationTimestamp(value metav1 // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.DeletionTimestamp = &value return b @@ -171,7 +171,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithDeletionTimestamp(value metav1 // WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.DeletionGracePeriodSeconds = &value return b @@ -181,7 +181,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(val // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, the entries provided by each call will be put on the Labels field, // overwriting an existing map entries in Labels field with the same key. -func (b *PodSecurityPolicyApplyConfiguration) WithLabels(entries map[string]string) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithLabels(entries map[string]string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() if b.Labels == nil && len(entries) > 0 { b.Labels = make(map[string]string, len(entries)) @@ -196,7 +196,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithLabels(entries map[string]stri // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, the entries provided by each call will be put on the Annotations field, // overwriting an existing map entries in Annotations field with the same key. -func (b *PodSecurityPolicyApplyConfiguration) WithAnnotations(entries map[string]string) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithAnnotations(entries map[string]string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() if b.Annotations == nil && len(entries) > 0 { b.Annotations = make(map[string]string, len(entries)) @@ -210,7 +210,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithAnnotations(entries map[string // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *PodSecurityPolicyApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { @@ -224,7 +224,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithOwnerReferences(values ...*v1. // WithFinalizers adds the given value to the Finalizers field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Finalizers field. -func (b *PodSecurityPolicyApplyConfiguration) WithFinalizers(values ...string) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithFinalizers(values ...string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { b.Finalizers = append(b.Finalizers, values[i]) @@ -232,7 +232,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithFinalizers(values ...string) * return b } -func (b *PodSecurityPolicyApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { +func (b *IPAddressApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} } @@ -241,7 +241,7 @@ func (b *PodSecurityPolicyApplyConfiguration) ensureObjectMetaApplyConfiguration // WithSpec sets the Spec field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Spec field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithSpec(value *PodSecurityPolicySpecApplyConfiguration) *PodSecurityPolicyApplyConfiguration { +func (b *IPAddressApplyConfiguration) WithSpec(value *IPAddressSpecApplyConfiguration) *IPAddressApplyConfiguration { b.Spec = value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddressspec.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddressspec.go new file mode 100644 index 0000000000..064963d691 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddressspec.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// IPAddressSpecApplyConfiguration represents an declarative configuration of the IPAddressSpec type for use +// with apply. +type IPAddressSpecApplyConfiguration struct { + ParentRef *ParentReferenceApplyConfiguration `json:"parentRef,omitempty"` +} + +// IPAddressSpecApplyConfiguration constructs an declarative configuration of the IPAddressSpec type for use with +// apply. +func IPAddressSpec() *IPAddressSpecApplyConfiguration { + return &IPAddressSpecApplyConfiguration{} +} + +// WithParentRef sets the ParentRef field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ParentRef field is set to the value of the last call. +func (b *IPAddressSpecApplyConfiguration) WithParentRef(value *ParentReferenceApplyConfiguration) *IPAddressSpecApplyConfiguration { + b.ParentRef = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go new file mode 100644 index 0000000000..14b10b19ff --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go @@ -0,0 +1,79 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + types "k8s.io/apimachinery/pkg/types" +) + +// ParentReferenceApplyConfiguration represents an declarative configuration of the ParentReference type for use +// with apply. +type ParentReferenceApplyConfiguration struct { + Group *string `json:"group,omitempty"` + Resource *string `json:"resource,omitempty"` + Namespace *string `json:"namespace,omitempty"` + Name *string `json:"name,omitempty"` + UID *types.UID `json:"uid,omitempty"` +} + +// ParentReferenceApplyConfiguration constructs an declarative configuration of the ParentReference type for use with +// apply. +func ParentReference() *ParentReferenceApplyConfiguration { + return &ParentReferenceApplyConfiguration{} +} + +// WithGroup sets the Group field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Group field is set to the value of the last call. +func (b *ParentReferenceApplyConfiguration) WithGroup(value string) *ParentReferenceApplyConfiguration { + b.Group = &value + return b +} + +// WithResource sets the Resource field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resource field is set to the value of the last call. +func (b *ParentReferenceApplyConfiguration) WithResource(value string) *ParentReferenceApplyConfiguration { + b.Resource = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ParentReferenceApplyConfiguration) WithNamespace(value string) *ParentReferenceApplyConfiguration { + b.Namespace = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ParentReferenceApplyConfiguration) WithName(value string) *ParentReferenceApplyConfiguration { + b.Name = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ParentReferenceApplyConfiguration) WithUID(value types.UID) *ParentReferenceApplyConfiguration { + b.UID = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/allocationresult.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/allocationresult.go similarity index 76% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/allocationresult.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/allocationresult.go index a2ad3adf1a..bc6078aa94 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/allocationresult.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/allocationresult.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( v1 "k8s.io/client-go/applyconfigurations/core/v1" @@ -25,7 +25,7 @@ import ( // AllocationResultApplyConfiguration represents an declarative configuration of the AllocationResult type for use // with apply. type AllocationResultApplyConfiguration struct { - ResourceHandle *string `json:"resourceHandle,omitempty"` + ResourceHandles []ResourceHandleApplyConfiguration `json:"resourceHandles,omitempty"` AvailableOnNodes *v1.NodeSelectorApplyConfiguration `json:"availableOnNodes,omitempty"` Shareable *bool `json:"shareable,omitempty"` } @@ -36,11 +36,16 @@ func AllocationResult() *AllocationResultApplyConfiguration { return &AllocationResultApplyConfiguration{} } -// WithResourceHandle sets the ResourceHandle field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ResourceHandle field is set to the value of the last call. -func (b *AllocationResultApplyConfiguration) WithResourceHandle(value string) *AllocationResultApplyConfiguration { - b.ResourceHandle = &value +// WithResourceHandles adds the given value to the ResourceHandles field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResourceHandles field. +func (b *AllocationResultApplyConfiguration) WithResourceHandles(values ...*ResourceHandleApplyConfiguration) *AllocationResultApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResourceHandles") + } + b.ResourceHandles = append(b.ResourceHandles, *values[i]) + } return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontext.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontext.go new file mode 100644 index 0000000000..1dfb6ff97b --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontext.go @@ -0,0 +1,258 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// PodSchedulingContextApplyConfiguration represents an declarative configuration of the PodSchedulingContext type for use +// with apply. +type PodSchedulingContextApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *PodSchedulingContextSpecApplyConfiguration `json:"spec,omitempty"` + Status *PodSchedulingContextStatusApplyConfiguration `json:"status,omitempty"` +} + +// PodSchedulingContext constructs an declarative configuration of the PodSchedulingContext type for use with +// apply. +func PodSchedulingContext(name, namespace string) *PodSchedulingContextApplyConfiguration { + b := &PodSchedulingContextApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("PodSchedulingContext") + b.WithAPIVersion("resource.k8s.io/v1alpha2") + return b +} + +// ExtractPodSchedulingContext extracts the applied configuration owned by fieldManager from +// podSchedulingContext. If no managedFields are found in podSchedulingContext for fieldManager, a +// PodSchedulingContextApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// podSchedulingContext must be a unmodified PodSchedulingContext API object that was retrieved from the Kubernetes API. +// ExtractPodSchedulingContext provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractPodSchedulingContext(podSchedulingContext *resourcev1alpha2.PodSchedulingContext, fieldManager string) (*PodSchedulingContextApplyConfiguration, error) { + return extractPodSchedulingContext(podSchedulingContext, fieldManager, "") +} + +// ExtractPodSchedulingContextStatus is the same as ExtractPodSchedulingContext except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractPodSchedulingContextStatus(podSchedulingContext *resourcev1alpha2.PodSchedulingContext, fieldManager string) (*PodSchedulingContextApplyConfiguration, error) { + return extractPodSchedulingContext(podSchedulingContext, fieldManager, "status") +} + +func extractPodSchedulingContext(podSchedulingContext *resourcev1alpha2.PodSchedulingContext, fieldManager string, subresource string) (*PodSchedulingContextApplyConfiguration, error) { + b := &PodSchedulingContextApplyConfiguration{} + err := managedfields.ExtractInto(podSchedulingContext, internal.Parser().Type("io.k8s.api.resource.v1alpha2.PodSchedulingContext"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(podSchedulingContext.Name) + b.WithNamespace(podSchedulingContext.Namespace) + + b.WithKind("PodSchedulingContext") + b.WithAPIVersion("resource.k8s.io/v1alpha2") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithKind(value string) *PodSchedulingContextApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithAPIVersion(value string) *PodSchedulingContextApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithName(value string) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithGenerateName(value string) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithNamespace(value string) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithUID(value types.UID) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithResourceVersion(value string) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithGeneration(value int64) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *PodSchedulingContextApplyConfiguration) WithLabels(entries map[string]string) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *PodSchedulingContextApplyConfiguration) WithAnnotations(entries map[string]string) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *PodSchedulingContextApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *PodSchedulingContextApplyConfiguration) WithFinalizers(values ...string) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *PodSchedulingContextApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithSpec(value *PodSchedulingContextSpecApplyConfiguration) *PodSchedulingContextApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithStatus(value *PodSchedulingContextStatusApplyConfiguration) *PodSchedulingContextApplyConfiguration { + b.Status = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/podschedulingspec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextspec.go similarity index 67% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/podschedulingspec.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextspec.go index 9fd3c1ee53..c95d3295e8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/podschedulingspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextspec.go @@ -16,25 +16,25 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 -// PodSchedulingSpecApplyConfiguration represents an declarative configuration of the PodSchedulingSpec type for use +// PodSchedulingContextSpecApplyConfiguration represents an declarative configuration of the PodSchedulingContextSpec type for use // with apply. -type PodSchedulingSpecApplyConfiguration struct { +type PodSchedulingContextSpecApplyConfiguration struct { SelectedNode *string `json:"selectedNode,omitempty"` PotentialNodes []string `json:"potentialNodes,omitempty"` } -// PodSchedulingSpecApplyConfiguration constructs an declarative configuration of the PodSchedulingSpec type for use with +// PodSchedulingContextSpecApplyConfiguration constructs an declarative configuration of the PodSchedulingContextSpec type for use with // apply. -func PodSchedulingSpec() *PodSchedulingSpecApplyConfiguration { - return &PodSchedulingSpecApplyConfiguration{} +func PodSchedulingContextSpec() *PodSchedulingContextSpecApplyConfiguration { + return &PodSchedulingContextSpecApplyConfiguration{} } // WithSelectedNode sets the SelectedNode field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the SelectedNode field is set to the value of the last call. -func (b *PodSchedulingSpecApplyConfiguration) WithSelectedNode(value string) *PodSchedulingSpecApplyConfiguration { +func (b *PodSchedulingContextSpecApplyConfiguration) WithSelectedNode(value string) *PodSchedulingContextSpecApplyConfiguration { b.SelectedNode = &value return b } @@ -42,7 +42,7 @@ func (b *PodSchedulingSpecApplyConfiguration) WithSelectedNode(value string) *Po // WithPotentialNodes adds the given value to the PotentialNodes field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the PotentialNodes field. -func (b *PodSchedulingSpecApplyConfiguration) WithPotentialNodes(values ...string) *PodSchedulingSpecApplyConfiguration { +func (b *PodSchedulingContextSpecApplyConfiguration) WithPotentialNodes(values ...string) *PodSchedulingContextSpecApplyConfiguration { for i := range values { b.PotentialNodes = append(b.PotentialNodes, values[i]) } diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/podschedulingstatus.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextstatus.go similarity index 64% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/podschedulingstatus.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextstatus.go index 5744f6c3eb..a8b10b9a0e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/podschedulingstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextstatus.go @@ -16,24 +16,24 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 -// PodSchedulingStatusApplyConfiguration represents an declarative configuration of the PodSchedulingStatus type for use +// PodSchedulingContextStatusApplyConfiguration represents an declarative configuration of the PodSchedulingContextStatus type for use // with apply. -type PodSchedulingStatusApplyConfiguration struct { +type PodSchedulingContextStatusApplyConfiguration struct { ResourceClaims []ResourceClaimSchedulingStatusApplyConfiguration `json:"resourceClaims,omitempty"` } -// PodSchedulingStatusApplyConfiguration constructs an declarative configuration of the PodSchedulingStatus type for use with +// PodSchedulingContextStatusApplyConfiguration constructs an declarative configuration of the PodSchedulingContextStatus type for use with // apply. -func PodSchedulingStatus() *PodSchedulingStatusApplyConfiguration { - return &PodSchedulingStatusApplyConfiguration{} +func PodSchedulingContextStatus() *PodSchedulingContextStatusApplyConfiguration { + return &PodSchedulingContextStatusApplyConfiguration{} } // WithResourceClaims adds the given value to the ResourceClaims field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the ResourceClaims field. -func (b *PodSchedulingStatusApplyConfiguration) WithResourceClaims(values ...*ResourceClaimSchedulingStatusApplyConfiguration) *PodSchedulingStatusApplyConfiguration { +func (b *PodSchedulingContextStatusApplyConfiguration) WithResourceClaims(values ...*ResourceClaimSchedulingStatusApplyConfiguration) *PodSchedulingContextStatusApplyConfiguration { for i := range values { if values[i] == nil { panic("nil value passed to WithResourceClaims") diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaim.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaim.go similarity index 96% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaim.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaim.go index f94811a9b1..6c219f837b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaim.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaim.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( - resourcev1alpha1 "k8s.io/api/resource/v1alpha1" + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -43,7 +43,7 @@ func ResourceClaim(name, namespace string) *ResourceClaimApplyConfiguration { b.WithName(name) b.WithNamespace(namespace) b.WithKind("ResourceClaim") - b.WithAPIVersion("resource.k8s.io/v1alpha1") + b.WithAPIVersion("resource.k8s.io/v1alpha2") return b } @@ -58,20 +58,20 @@ func ResourceClaim(name, namespace string) *ResourceClaimApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractResourceClaim(resourceClaim *resourcev1alpha1.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) { +func ExtractResourceClaim(resourceClaim *resourcev1alpha2.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) { return extractResourceClaim(resourceClaim, fieldManager, "") } // ExtractResourceClaimStatus is the same as ExtractResourceClaim except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractResourceClaimStatus(resourceClaim *resourcev1alpha1.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) { +func ExtractResourceClaimStatus(resourceClaim *resourcev1alpha2.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) { return extractResourceClaim(resourceClaim, fieldManager, "status") } -func extractResourceClaim(resourceClaim *resourcev1alpha1.ResourceClaim, fieldManager string, subresource string) (*ResourceClaimApplyConfiguration, error) { +func extractResourceClaim(resourceClaim *resourcev1alpha2.ResourceClaim, fieldManager string, subresource string) (*ResourceClaimApplyConfiguration, error) { b := &ResourceClaimApplyConfiguration{} - err := managedfields.ExtractInto(resourceClaim, internal.Parser().Type("io.k8s.api.resource.v1alpha1.ResourceClaim"), fieldManager, b, subresource) + err := managedfields.ExtractInto(resourceClaim, internal.Parser().Type("io.k8s.api.resource.v1alpha2.ResourceClaim"), fieldManager, b, subresource) if err != nil { return nil, err } @@ -79,7 +79,7 @@ func extractResourceClaim(resourceClaim *resourcev1alpha1.ResourceClaim, fieldMa b.WithNamespace(resourceClaim.Namespace) b.WithKind("ResourceClaim") - b.WithAPIVersion("resource.k8s.io/v1alpha1") + b.WithAPIVersion("resource.k8s.io/v1alpha2") return b, nil } diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimconsumerreference.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimconsumerreference.go similarity index 99% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimconsumerreference.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimconsumerreference.go index 477099cd7a..41bb9e9a14 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimconsumerreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimconsumerreference.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( types "k8s.io/apimachinery/pkg/types" diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimparametersreference.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimparametersreference.go similarity index 99% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimparametersreference.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimparametersreference.go index d7b25d75eb..27820ede60 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimparametersreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimparametersreference.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 // ResourceClaimParametersReferenceApplyConfiguration represents an declarative configuration of the ResourceClaimParametersReference type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimschedulingstatus.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimschedulingstatus.go similarity index 99% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimschedulingstatus.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimschedulingstatus.go index 35ff34abab..e74679aed3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimschedulingstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimschedulingstatus.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 // ResourceClaimSchedulingStatusApplyConfiguration represents an declarative configuration of the ResourceClaimSchedulingStatus type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimspec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimspec.go similarity index 93% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimspec.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimspec.go index d326190462..0c73e64e9e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimspec.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( - resourcev1alpha1 "k8s.io/api/resource/v1alpha1" + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" ) // ResourceClaimSpecApplyConfiguration represents an declarative configuration of the ResourceClaimSpec type for use @@ -27,7 +27,7 @@ import ( type ResourceClaimSpecApplyConfiguration struct { ResourceClassName *string `json:"resourceClassName,omitempty"` ParametersRef *ResourceClaimParametersReferenceApplyConfiguration `json:"parametersRef,omitempty"` - AllocationMode *resourcev1alpha1.AllocationMode `json:"allocationMode,omitempty"` + AllocationMode *resourcev1alpha2.AllocationMode `json:"allocationMode,omitempty"` } // ResourceClaimSpecApplyConfiguration constructs an declarative configuration of the ResourceClaimSpec type for use with @@ -55,7 +55,7 @@ func (b *ResourceClaimSpecApplyConfiguration) WithParametersRef(value *ResourceC // WithAllocationMode sets the AllocationMode field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the AllocationMode field is set to the value of the last call. -func (b *ResourceClaimSpecApplyConfiguration) WithAllocationMode(value resourcev1alpha1.AllocationMode) *ResourceClaimSpecApplyConfiguration { +func (b *ResourceClaimSpecApplyConfiguration) WithAllocationMode(value resourcev1alpha2.AllocationMode) *ResourceClaimSpecApplyConfiguration { b.AllocationMode = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimstatus.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimstatus.go similarity index 99% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimstatus.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimstatus.go index e2283f8b07..c6fa610906 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimstatus.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 // ResourceClaimStatusApplyConfiguration represents an declarative configuration of the ResourceClaimStatus type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimtemplate.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplate.go similarity index 96% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimtemplate.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplate.go index e3c602cb65..fc2209b8f0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimtemplate.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplate.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( - resourcev1alpha1 "k8s.io/api/resource/v1alpha1" + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -42,7 +42,7 @@ func ResourceClaimTemplate(name, namespace string) *ResourceClaimTemplateApplyCo b.WithName(name) b.WithNamespace(namespace) b.WithKind("ResourceClaimTemplate") - b.WithAPIVersion("resource.k8s.io/v1alpha1") + b.WithAPIVersion("resource.k8s.io/v1alpha2") return b } @@ -57,20 +57,20 @@ func ResourceClaimTemplate(name, namespace string) *ResourceClaimTemplateApplyCo // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractResourceClaimTemplate(resourceClaimTemplate *resourcev1alpha1.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) { +func ExtractResourceClaimTemplate(resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) { return extractResourceClaimTemplate(resourceClaimTemplate, fieldManager, "") } // ExtractResourceClaimTemplateStatus is the same as ExtractResourceClaimTemplate except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractResourceClaimTemplateStatus(resourceClaimTemplate *resourcev1alpha1.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) { +func ExtractResourceClaimTemplateStatus(resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) { return extractResourceClaimTemplate(resourceClaimTemplate, fieldManager, "status") } -func extractResourceClaimTemplate(resourceClaimTemplate *resourcev1alpha1.ResourceClaimTemplate, fieldManager string, subresource string) (*ResourceClaimTemplateApplyConfiguration, error) { +func extractResourceClaimTemplate(resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplate, fieldManager string, subresource string) (*ResourceClaimTemplateApplyConfiguration, error) { b := &ResourceClaimTemplateApplyConfiguration{} - err := managedfields.ExtractInto(resourceClaimTemplate, internal.Parser().Type("io.k8s.api.resource.v1alpha1.ResourceClaimTemplate"), fieldManager, b, subresource) + err := managedfields.ExtractInto(resourceClaimTemplate, internal.Parser().Type("io.k8s.api.resource.v1alpha2.ResourceClaimTemplate"), fieldManager, b, subresource) if err != nil { return nil, err } @@ -78,7 +78,7 @@ func extractResourceClaimTemplate(resourceClaimTemplate *resourcev1alpha1.Resour b.WithNamespace(resourceClaimTemplate.Namespace) b.WithKind("ResourceClaimTemplate") - b.WithAPIVersion("resource.k8s.io/v1alpha1") + b.WithAPIVersion("resource.k8s.io/v1alpha2") return b, nil } diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimtemplatespec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplatespec.go similarity index 99% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimtemplatespec.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplatespec.go index 88058e066e..2f38ea0366 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclaimtemplatespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplatespec.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclass.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclass.go similarity index 96% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclass.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclass.go index 5f980acdb1..724c9e88e0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclass.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( - resourcev1alpha1 "k8s.io/api/resource/v1alpha1" + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -44,7 +44,7 @@ func ResourceClass(name string) *ResourceClassApplyConfiguration { b := &ResourceClassApplyConfiguration{} b.WithName(name) b.WithKind("ResourceClass") - b.WithAPIVersion("resource.k8s.io/v1alpha1") + b.WithAPIVersion("resource.k8s.io/v1alpha2") return b } @@ -59,27 +59,27 @@ func ResourceClass(name string) *ResourceClassApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractResourceClass(resourceClass *resourcev1alpha1.ResourceClass, fieldManager string) (*ResourceClassApplyConfiguration, error) { +func ExtractResourceClass(resourceClass *resourcev1alpha2.ResourceClass, fieldManager string) (*ResourceClassApplyConfiguration, error) { return extractResourceClass(resourceClass, fieldManager, "") } // ExtractResourceClassStatus is the same as ExtractResourceClass except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractResourceClassStatus(resourceClass *resourcev1alpha1.ResourceClass, fieldManager string) (*ResourceClassApplyConfiguration, error) { +func ExtractResourceClassStatus(resourceClass *resourcev1alpha2.ResourceClass, fieldManager string) (*ResourceClassApplyConfiguration, error) { return extractResourceClass(resourceClass, fieldManager, "status") } -func extractResourceClass(resourceClass *resourcev1alpha1.ResourceClass, fieldManager string, subresource string) (*ResourceClassApplyConfiguration, error) { +func extractResourceClass(resourceClass *resourcev1alpha2.ResourceClass, fieldManager string, subresource string) (*ResourceClassApplyConfiguration, error) { b := &ResourceClassApplyConfiguration{} - err := managedfields.ExtractInto(resourceClass, internal.Parser().Type("io.k8s.api.resource.v1alpha1.ResourceClass"), fieldManager, b, subresource) + err := managedfields.ExtractInto(resourceClass, internal.Parser().Type("io.k8s.api.resource.v1alpha2.ResourceClass"), fieldManager, b, subresource) if err != nil { return nil, err } b.WithName(resourceClass.Name) b.WithKind("ResourceClass") - b.WithAPIVersion("resource.k8s.io/v1alpha1") + b.WithAPIVersion("resource.k8s.io/v1alpha2") return b, nil } diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclassparametersreference.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclassparametersreference.go similarity index 99% rename from vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclassparametersreference.go rename to vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclassparametersreference.go index b03a9a6da4..d67e4d3977 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha1/resourceclassparametersreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclassparametersreference.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 // ResourceClassParametersReferenceApplyConfiguration represents an declarative configuration of the ResourceClassParametersReference type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcehandle.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcehandle.go new file mode 100644 index 0000000000..028cbaa1a7 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcehandle.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha2 + +// ResourceHandleApplyConfiguration represents an declarative configuration of the ResourceHandle type for use +// with apply. +type ResourceHandleApplyConfiguration struct { + DriverName *string `json:"driverName,omitempty"` + Data *string `json:"data,omitempty"` +} + +// ResourceHandleApplyConfiguration constructs an declarative configuration of the ResourceHandle type for use with +// apply. +func ResourceHandle() *ResourceHandleApplyConfiguration { + return &ResourceHandleApplyConfiguration{} +} + +// WithDriverName sets the DriverName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DriverName field is set to the value of the last call. +func (b *ResourceHandleApplyConfiguration) WithDriverName(value string) *ResourceHandleApplyConfiguration { + b.DriverName = &value + return b +} + +// WithData sets the Data field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Data field is set to the value of the last call. +func (b *ResourceHandleApplyConfiguration) WithData(value string) *ResourceHandleApplyConfiguration { + b.Data = &value + return b +} diff --git a/vendor/k8s.io/client-go/discovery/discovery_client.go b/vendor/k8s.io/client-go/discovery/discovery_client.go index 1253fa1f44..5490b48255 100644 --- a/vendor/k8s.io/client-go/discovery/discovery_client.go +++ b/vendor/k8s.io/client-go/discovery/discovery_client.go @@ -67,6 +67,9 @@ const ( acceptDiscoveryFormats = AcceptV2Beta1 + "," + AcceptV1 ) +// Aggregated discovery content-type GVK. +var v2Beta1GVK = schema.GroupVersionKind{Group: "apidiscovery.k8s.io", Version: "v2beta1", Kind: "APIGroupDiscoveryList"} + // DiscoveryInterface holds the methods that discover server-supported API groups, // versions and resources. type DiscoveryInterface interface { @@ -260,16 +263,15 @@ func (d *DiscoveryClient) downloadLegacy() ( } var resourcesByGV map[schema.GroupVersion]*metav1.APIResourceList - // Switch on content-type server responded with: aggregated or unaggregated. - switch { - case isV2Beta1ContentType(responseContentType): + // Based on the content-type server responded with: aggregated or unaggregated. + if isGVK, _ := ContentTypeIsGVK(responseContentType, v2Beta1GVK); isGVK { var aggregatedDiscovery apidiscovery.APIGroupDiscoveryList err = json.Unmarshal(body, &aggregatedDiscovery) if err != nil { return nil, nil, nil, err } apiGroupList, resourcesByGV, failedGVs = SplitGroupsAndResources(aggregatedDiscovery) - default: + } else { // Default is unaggregated discovery v1. var v metav1.APIVersions err = json.Unmarshal(body, &v) @@ -313,16 +315,15 @@ func (d *DiscoveryClient) downloadAPIs() ( apiGroupList := &metav1.APIGroupList{} failedGVs := map[schema.GroupVersion]error{} var resourcesByGV map[schema.GroupVersion]*metav1.APIResourceList - // Switch on content-type server responded with: aggregated or unaggregated. - switch { - case isV2Beta1ContentType(responseContentType): + // Based on the content-type server responded with: aggregated or unaggregated. + if isGVK, _ := ContentTypeIsGVK(responseContentType, v2Beta1GVK); isGVK { var aggregatedDiscovery apidiscovery.APIGroupDiscoveryList err = json.Unmarshal(body, &aggregatedDiscovery) if err != nil { return nil, nil, nil, err } apiGroupList, resourcesByGV, failedGVs = SplitGroupsAndResources(aggregatedDiscovery) - default: + } else { // Default is unaggregated discovery v1. err = json.Unmarshal(body, apiGroupList) if err != nil { @@ -333,26 +334,29 @@ func (d *DiscoveryClient) downloadAPIs() ( return apiGroupList, resourcesByGV, failedGVs, nil } -// isV2Beta1ContentType checks of the content-type string is both -// "application/json" and contains the v2beta1 content-type params. +// ContentTypeIsGVK checks of the content-type string is both +// "application/json" and matches the provided GVK. An error +// is returned if the content type string is malformed. // NOTE: This function is resilient to the ordering of the // content-type parameters, as well as parameters added by // intermediaries such as proxies or gateways. Examples: // -// "application/json; g=apidiscovery.k8s.io;v=v2beta1;as=APIGroupDiscoveryList" = true -// "application/json; as=APIGroupDiscoveryList;v=v2beta1;g=apidiscovery.k8s.io" = true -// "application/json; as=APIGroupDiscoveryList;v=v2beta1;g=apidiscovery.k8s.io;charset=utf-8" = true -// "application/json" = false -// "application/json; charset=UTF-8" = false -func isV2Beta1ContentType(contentType string) bool { +// ("application/json; g=apidiscovery.k8s.io;v=v2beta1;as=APIGroupDiscoveryList", {apidiscovery.k8s.io, v2beta1, APIGroupDiscoveryList}) = (true, nil) +// ("application/json; as=APIGroupDiscoveryList;v=v2beta1;g=apidiscovery.k8s.io", {apidiscovery.k8s.io, v2beta1, APIGroupDiscoveryList}) = (true, nil) +// ("application/json; as=APIGroupDiscoveryList;v=v2beta1;g=apidiscovery.k8s.io;charset=utf-8", {apidiscovery.k8s.io, v2beta1, APIGroupDiscoveryList}) = (true, nil) +// ("application/json", any GVK) = (false, nil) +// ("application/json; charset=UTF-8", any GVK) = (false, nil) +// ("malformed content type string", any GVK) = (false, error) +func ContentTypeIsGVK(contentType string, gvk schema.GroupVersionKind) (bool, error) { base, params, err := mime.ParseMediaType(contentType) if err != nil { - return false + return false, err } - return runtime.ContentTypeJSON == base && - params["g"] == "apidiscovery.k8s.io" && - params["v"] == "v2beta1" && - params["as"] == "APIGroupDiscoveryList" + gvkMatch := runtime.ContentTypeJSON == base && + params["g"] == gvk.Group && + params["v"] == gvk.Version && + params["as"] == gvk.Kind + return gvkMatch, nil } // ServerGroups returns the supported groups, with information like supported versions and the diff --git a/vendor/k8s.io/client-go/discovery/fake/discovery.go b/vendor/k8s.io/client-go/discovery/fake/discovery.go index c78c256ef7..d234db893d 100644 --- a/vendor/k8s.io/client-go/discovery/fake/discovery.go +++ b/vendor/k8s.io/client-go/discovery/fake/discovery.go @@ -141,7 +141,10 @@ func (c *FakeDiscovery) ServerVersion() (*version.Info, error) { action := testing.ActionImpl{} action.Verb = "get" action.Resource = schema.GroupVersionResource{Resource: "version"} - c.Invokes(action, nil) + _, err := c.Invokes(action, nil) + if err != nil { + return nil, err + } if c.FakedServerVersion != nil { return c.FakedServerVersion, nil diff --git a/vendor/k8s.io/client-go/kubernetes/clientset.go b/vendor/k8s.io/client-go/kubernetes/clientset.go index 9eecbb2a80..6345f2fb62 100644 --- a/vendor/k8s.io/client-go/kubernetes/clientset.go +++ b/vendor/k8s.io/client-go/kubernetes/clientset.go @@ -42,6 +42,7 @@ import ( batchv1 "k8s.io/client-go/kubernetes/typed/batch/v1" batchv1beta1 "k8s.io/client-go/kubernetes/typed/batch/v1beta1" certificatesv1 "k8s.io/client-go/kubernetes/typed/certificates/v1" + certificatesv1alpha1 "k8s.io/client-go/kubernetes/typed/certificates/v1alpha1" certificatesv1beta1 "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1" coordinationv1beta1 "k8s.io/client-go/kubernetes/typed/coordination/v1beta1" @@ -66,7 +67,7 @@ import ( rbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" rbacv1alpha1 "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1" rbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1" - resourcev1alpha1 "k8s.io/client-go/kubernetes/typed/resource/v1alpha1" + resourcev1alpha2 "k8s.io/client-go/kubernetes/typed/resource/v1alpha2" schedulingv1 "k8s.io/client-go/kubernetes/typed/scheduling/v1" schedulingv1alpha1 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1" @@ -99,6 +100,7 @@ type Interface interface { BatchV1beta1() batchv1beta1.BatchV1beta1Interface CertificatesV1() certificatesv1.CertificatesV1Interface CertificatesV1beta1() certificatesv1beta1.CertificatesV1beta1Interface + CertificatesV1alpha1() certificatesv1alpha1.CertificatesV1alpha1Interface CoordinationV1beta1() coordinationv1beta1.CoordinationV1beta1Interface CoordinationV1() coordinationv1.CoordinationV1Interface CoreV1() corev1.CoreV1Interface @@ -122,7 +124,7 @@ type Interface interface { RbacV1() rbacv1.RbacV1Interface RbacV1beta1() rbacv1beta1.RbacV1beta1Interface RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface - ResourceV1alpha1() resourcev1alpha1.ResourceV1alpha1Interface + ResourceV1alpha2() resourcev1alpha2.ResourceV1alpha2Interface SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface SchedulingV1beta1() schedulingv1beta1.SchedulingV1beta1Interface SchedulingV1() schedulingv1.SchedulingV1Interface @@ -154,6 +156,7 @@ type Clientset struct { batchV1beta1 *batchv1beta1.BatchV1beta1Client certificatesV1 *certificatesv1.CertificatesV1Client certificatesV1beta1 *certificatesv1beta1.CertificatesV1beta1Client + certificatesV1alpha1 *certificatesv1alpha1.CertificatesV1alpha1Client coordinationV1beta1 *coordinationv1beta1.CoordinationV1beta1Client coordinationV1 *coordinationv1.CoordinationV1Client coreV1 *corev1.CoreV1Client @@ -177,7 +180,7 @@ type Clientset struct { rbacV1 *rbacv1.RbacV1Client rbacV1beta1 *rbacv1beta1.RbacV1beta1Client rbacV1alpha1 *rbacv1alpha1.RbacV1alpha1Client - resourceV1alpha1 *resourcev1alpha1.ResourceV1alpha1Client + resourceV1alpha2 *resourcev1alpha2.ResourceV1alpha2Client schedulingV1alpha1 *schedulingv1alpha1.SchedulingV1alpha1Client schedulingV1beta1 *schedulingv1beta1.SchedulingV1beta1Client schedulingV1 *schedulingv1.SchedulingV1Client @@ -286,6 +289,11 @@ func (c *Clientset) CertificatesV1beta1() certificatesv1beta1.CertificatesV1beta return c.certificatesV1beta1 } +// CertificatesV1alpha1 retrieves the CertificatesV1alpha1Client +func (c *Clientset) CertificatesV1alpha1() certificatesv1alpha1.CertificatesV1alpha1Interface { + return c.certificatesV1alpha1 +} + // CoordinationV1beta1 retrieves the CoordinationV1beta1Client func (c *Clientset) CoordinationV1beta1() coordinationv1beta1.CoordinationV1beta1Interface { return c.coordinationV1beta1 @@ -401,9 +409,9 @@ func (c *Clientset) RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface { return c.rbacV1alpha1 } -// ResourceV1alpha1 retrieves the ResourceV1alpha1Client -func (c *Clientset) ResourceV1alpha1() resourcev1alpha1.ResourceV1alpha1Interface { - return c.resourceV1alpha1 +// ResourceV1alpha2 retrieves the ResourceV1alpha2Client +func (c *Clientset) ResourceV1alpha2() resourcev1alpha2.ResourceV1alpha2Interface { + return c.resourceV1alpha2 } // SchedulingV1alpha1 retrieves the SchedulingV1alpha1Client @@ -560,6 +568,10 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } + cs.certificatesV1alpha1, err = certificatesv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } cs.coordinationV1beta1, err = coordinationv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err @@ -652,7 +664,7 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } - cs.resourceV1alpha1, err = resourcev1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) + cs.resourceV1alpha2, err = resourcev1alpha2.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } @@ -721,6 +733,7 @@ func New(c rest.Interface) *Clientset { cs.batchV1beta1 = batchv1beta1.New(c) cs.certificatesV1 = certificatesv1.New(c) cs.certificatesV1beta1 = certificatesv1beta1.New(c) + cs.certificatesV1alpha1 = certificatesv1alpha1.New(c) cs.coordinationV1beta1 = coordinationv1beta1.New(c) cs.coordinationV1 = coordinationv1.New(c) cs.coreV1 = corev1.New(c) @@ -744,7 +757,7 @@ func New(c rest.Interface) *Clientset { cs.rbacV1 = rbacv1.New(c) cs.rbacV1beta1 = rbacv1beta1.New(c) cs.rbacV1alpha1 = rbacv1alpha1.New(c) - cs.resourceV1alpha1 = resourcev1alpha1.New(c) + cs.resourceV1alpha2 = resourcev1alpha2.New(c) cs.schedulingV1alpha1 = schedulingv1alpha1.New(c) cs.schedulingV1beta1 = schedulingv1beta1.New(c) cs.schedulingV1 = schedulingv1.New(c) diff --git a/vendor/k8s.io/client-go/kubernetes/doc.go b/vendor/k8s.io/client-go/kubernetes/doc.go index b272334ad0..9cef4242f2 100644 --- a/vendor/k8s.io/client-go/kubernetes/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/doc.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2023 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated clientset. +// Package kubernetes holds packages which implement a clientset for Kubernetes +// APIs. package kubernetes diff --git a/vendor/k8s.io/client-go/kubernetes/scheme/register.go b/vendor/k8s.io/client-go/kubernetes/scheme/register.go index e43780529b..64d3ce2a7b 100644 --- a/vendor/k8s.io/client-go/kubernetes/scheme/register.go +++ b/vendor/k8s.io/client-go/kubernetes/scheme/register.go @@ -38,6 +38,7 @@ import ( batchv1 "k8s.io/api/batch/v1" batchv1beta1 "k8s.io/api/batch/v1beta1" certificatesv1 "k8s.io/api/certificates/v1" + certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1" certificatesv1beta1 "k8s.io/api/certificates/v1beta1" coordinationv1 "k8s.io/api/coordination/v1" coordinationv1beta1 "k8s.io/api/coordination/v1beta1" @@ -62,7 +63,7 @@ import ( rbacv1 "k8s.io/api/rbac/v1" rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" rbacv1beta1 "k8s.io/api/rbac/v1beta1" - resourcev1alpha1 "k8s.io/api/resource/v1alpha1" + resourcev1alpha2 "k8s.io/api/resource/v1alpha2" schedulingv1 "k8s.io/api/scheduling/v1" schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" @@ -100,6 +101,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ batchv1beta1.AddToScheme, certificatesv1.AddToScheme, certificatesv1beta1.AddToScheme, + certificatesv1alpha1.AddToScheme, coordinationv1beta1.AddToScheme, coordinationv1.AddToScheme, corev1.AddToScheme, @@ -123,7 +125,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ rbacv1.AddToScheme, rbacv1beta1.AddToScheme, rbacv1alpha1.AddToScheme, - resourcev1alpha1.AddToScheme, + resourcev1alpha2.AddToScheme, schedulingv1alpha1.AddToScheme, schedulingv1beta1.AddToScheme, schedulingv1.AddToScheme, diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicy.go index ba827f3c99..1d994b5abf 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicy.go @@ -43,6 +43,7 @@ type ValidatingAdmissionPoliciesGetter interface { type ValidatingAdmissionPolicyInterface interface { Create(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.CreateOptions) (*v1alpha1.ValidatingAdmissionPolicy, error) Update(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1alpha1.ValidatingAdmissionPolicy, error) + UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1alpha1.ValidatingAdmissionPolicy, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ValidatingAdmissionPolicy, error) @@ -50,6 +51,7 @@ type ValidatingAdmissionPolicyInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ValidatingAdmissionPolicy, err error) Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) + ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) ValidatingAdmissionPolicyExpansion } @@ -132,6 +134,21 @@ func (c *validatingAdmissionPolicies) Update(ctx context.Context, validatingAdmi return } +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *validatingAdmissionPolicies) UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { + result = &v1alpha1.ValidatingAdmissionPolicy{} + err = c.client.Put(). + Resource("validatingadmissionpolicies"). + Name(validatingAdmissionPolicy.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(validatingAdmissionPolicy). + Do(ctx). + Into(result) + return +} + // Delete takes name of the validatingAdmissionPolicy and deletes it. Returns an error if one occurs. func (c *validatingAdmissionPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { return c.client.Delete(). @@ -195,3 +212,32 @@ func (c *validatingAdmissionPolicies) Apply(ctx context.Context, validatingAdmis Into(result) return } + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *validatingAdmissionPolicies) ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { + if validatingAdmissionPolicy == nil { + return nil, fmt.Errorf("validatingAdmissionPolicy provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(validatingAdmissionPolicy) + if err != nil { + return nil, err + } + + name := validatingAdmissionPolicy.Name + if name == nil { + return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply") + } + + result = &v1alpha1.ValidatingAdmissionPolicy{} + err = c.client.Patch(types.ApplyPatchType). + Resource("validatingadmissionpolicies"). + Name(*name). + SubResource("status"). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go index 218cb60c31..7823729e09 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go @@ -28,6 +28,7 @@ import ( type AuthenticationV1beta1Interface interface { RESTClient() rest.Interface + SelfSubjectReviewsGetter TokenReviewsGetter } @@ -36,6 +37,10 @@ type AuthenticationV1beta1Client struct { restClient rest.Interface } +func (c *AuthenticationV1beta1Client) SelfSubjectReviews() SelfSubjectReviewInterface { + return newSelfSubjectReviews(c) +} + func (c *AuthenticationV1beta1Client) TokenReviews() TokenReviewInterface { return newTokenReviews(c) } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go index 60bf15ab99..527a458d74 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go @@ -18,4 +18,6 @@ limitations under the License. package v1beta1 +type SelfSubjectReviewExpansion interface{} + type TokenReviewExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/selfsubjectreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/selfsubjectreview.go new file mode 100644 index 0000000000..9d54826a31 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/selfsubjectreview.go @@ -0,0 +1,64 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + + v1beta1 "k8s.io/api/authentication/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// SelfSubjectReviewsGetter has a method to return a SelfSubjectReviewInterface. +// A group's client should implement this interface. +type SelfSubjectReviewsGetter interface { + SelfSubjectReviews() SelfSubjectReviewInterface +} + +// SelfSubjectReviewInterface has methods to work with SelfSubjectReview resources. +type SelfSubjectReviewInterface interface { + Create(ctx context.Context, selfSubjectReview *v1beta1.SelfSubjectReview, opts v1.CreateOptions) (*v1beta1.SelfSubjectReview, error) + SelfSubjectReviewExpansion +} + +// selfSubjectReviews implements SelfSubjectReviewInterface +type selfSubjectReviews struct { + client rest.Interface +} + +// newSelfSubjectReviews returns a SelfSubjectReviews +func newSelfSubjectReviews(c *AuthenticationV1beta1Client) *selfSubjectReviews { + return &selfSubjectReviews{ + client: c.RESTClient(), + } +} + +// Create takes the representation of a selfSubjectReview and creates it. Returns the server's representation of the selfSubjectReview, and an error, if there is any. +func (c *selfSubjectReviews) Create(ctx context.Context, selfSubjectReview *v1beta1.SelfSubjectReview, opts v1.CreateOptions) (result *v1beta1.SelfSubjectReview, err error) { + result = &v1beta1.SelfSubjectReview{} + err = c.client.Post(). + Resource("selfsubjectreviews"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(selfSubjectReview). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/certificates_client.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/certificates_client.go new file mode 100644 index 0000000000..a9050af945 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/certificates_client.go @@ -0,0 +1,107 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "net/http" + + v1alpha1 "k8s.io/api/certificates/v1alpha1" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type CertificatesV1alpha1Interface interface { + RESTClient() rest.Interface + ClusterTrustBundlesGetter +} + +// CertificatesV1alpha1Client is used to interact with features provided by the certificates.k8s.io group. +type CertificatesV1alpha1Client struct { + restClient rest.Interface +} + +func (c *CertificatesV1alpha1Client) ClusterTrustBundles() ClusterTrustBundleInterface { + return newClusterTrustBundles(c) +} + +// NewForConfig creates a new CertificatesV1alpha1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*CertificatesV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new CertificatesV1alpha1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*CertificatesV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &CertificatesV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new CertificatesV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *CertificatesV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new CertificatesV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *CertificatesV1alpha1Client { + return &CertificatesV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *CertificatesV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/clustertrustbundle.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/clustertrustbundle.go new file mode 100644 index 0000000000..970fb15e6e --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/clustertrustbundle.go @@ -0,0 +1,197 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + v1alpha1 "k8s.io/api/certificates/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + certificatesv1alpha1 "k8s.io/client-go/applyconfigurations/certificates/v1alpha1" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ClusterTrustBundlesGetter has a method to return a ClusterTrustBundleInterface. +// A group's client should implement this interface. +type ClusterTrustBundlesGetter interface { + ClusterTrustBundles() ClusterTrustBundleInterface +} + +// ClusterTrustBundleInterface has methods to work with ClusterTrustBundle resources. +type ClusterTrustBundleInterface interface { + Create(ctx context.Context, clusterTrustBundle *v1alpha1.ClusterTrustBundle, opts v1.CreateOptions) (*v1alpha1.ClusterTrustBundle, error) + Update(ctx context.Context, clusterTrustBundle *v1alpha1.ClusterTrustBundle, opts v1.UpdateOptions) (*v1alpha1.ClusterTrustBundle, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ClusterTrustBundle, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ClusterTrustBundleList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterTrustBundle, err error) + Apply(ctx context.Context, clusterTrustBundle *certificatesv1alpha1.ClusterTrustBundleApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterTrustBundle, err error) + ClusterTrustBundleExpansion +} + +// clusterTrustBundles implements ClusterTrustBundleInterface +type clusterTrustBundles struct { + client rest.Interface +} + +// newClusterTrustBundles returns a ClusterTrustBundles +func newClusterTrustBundles(c *CertificatesV1alpha1Client) *clusterTrustBundles { + return &clusterTrustBundles{ + client: c.RESTClient(), + } +} + +// Get takes name of the clusterTrustBundle, and returns the corresponding clusterTrustBundle object, and an error if there is any. +func (c *clusterTrustBundles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterTrustBundle, err error) { + result = &v1alpha1.ClusterTrustBundle{} + err = c.client.Get(). + Resource("clustertrustbundles"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterTrustBundles that match those selectors. +func (c *clusterTrustBundles) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterTrustBundleList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.ClusterTrustBundleList{} + err = c.client.Get(). + Resource("clustertrustbundles"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterTrustBundles. +func (c *clusterTrustBundles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("clustertrustbundles"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a clusterTrustBundle and creates it. Returns the server's representation of the clusterTrustBundle, and an error, if there is any. +func (c *clusterTrustBundles) Create(ctx context.Context, clusterTrustBundle *v1alpha1.ClusterTrustBundle, opts v1.CreateOptions) (result *v1alpha1.ClusterTrustBundle, err error) { + result = &v1alpha1.ClusterTrustBundle{} + err = c.client.Post(). + Resource("clustertrustbundles"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(clusterTrustBundle). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a clusterTrustBundle and updates it. Returns the server's representation of the clusterTrustBundle, and an error, if there is any. +func (c *clusterTrustBundles) Update(ctx context.Context, clusterTrustBundle *v1alpha1.ClusterTrustBundle, opts v1.UpdateOptions) (result *v1alpha1.ClusterTrustBundle, err error) { + result = &v1alpha1.ClusterTrustBundle{} + err = c.client.Put(). + Resource("clustertrustbundles"). + Name(clusterTrustBundle.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(clusterTrustBundle). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the clusterTrustBundle and deletes it. Returns an error if one occurs. +func (c *clusterTrustBundles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("clustertrustbundles"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterTrustBundles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("clustertrustbundles"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched clusterTrustBundle. +func (c *clusterTrustBundles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterTrustBundle, err error) { + result = &v1alpha1.ClusterTrustBundle{} + err = c.client.Patch(pt). + Resource("clustertrustbundles"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied clusterTrustBundle. +func (c *clusterTrustBundles) Apply(ctx context.Context, clusterTrustBundle *certificatesv1alpha1.ClusterTrustBundleApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterTrustBundle, err error) { + if clusterTrustBundle == nil { + return nil, fmt.Errorf("clusterTrustBundle provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(clusterTrustBundle) + if err != nil { + return nil, err + } + name := clusterTrustBundle.Name + if name == nil { + return nil, fmt.Errorf("clusterTrustBundle.Name must be provided to Apply") + } + result = &v1alpha1.ClusterTrustBundle{} + err = c.client.Patch(types.ApplyPatchType). + Resource("clustertrustbundles"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/doc.go similarity index 100% rename from vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/doc.go rename to vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/doc.go diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/generated_expansion.go new file mode 100644 index 0000000000..43cc534b37 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type ClusterTrustBundleExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go index 464fff9116..562f8d5e45 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go @@ -82,8 +82,7 @@ func (e *events) UpdateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, // It returns the copy of the event that the server returns, or an error. // The namespace and name of the target event is deduced from the event. // The namespace must either match this event client's namespace, or this event client must -// -// have been created with the "" namespace. +// have been created with the "" namespace. func (e *events) PatchWithEventNamespace(event *v1beta1.Event, data []byte) (*v1beta1.Event, error) { if e.ns != "" && event.Namespace != e.ns { return nil, fmt.Errorf("can't patch an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go index 827b514df6..4725d2cd16 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go @@ -32,7 +32,6 @@ type ExtensionsV1beta1Interface interface { DeploymentsGetter IngressesGetter NetworkPoliciesGetter - PodSecurityPoliciesGetter ReplicaSetsGetter } @@ -57,10 +56,6 @@ func (c *ExtensionsV1beta1Client) NetworkPolicies(namespace string) NetworkPolic return newNetworkPolicies(c, namespace) } -func (c *ExtensionsV1beta1Client) PodSecurityPolicies() PodSecurityPolicyInterface { - return newPodSecurityPolicies(c) -} - func (c *ExtensionsV1beta1Client) ReplicaSets(namespace string) ReplicaSetInterface { return newReplicaSets(c, namespace) } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go index 41d28f0417..67fcf4992b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go @@ -24,6 +24,4 @@ type IngressExpansion interface{} type NetworkPolicyExpansion interface{} -type PodSecurityPolicyExpansion interface{} - type ReplicaSetExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go deleted file mode 100644 index 3f38c3133d..0000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go +++ /dev/null @@ -1,197 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - "context" - json "encoding/json" - "fmt" - "time" - - v1beta1 "k8s.io/api/extensions/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// PodSecurityPoliciesGetter has a method to return a PodSecurityPolicyInterface. -// A group's client should implement this interface. -type PodSecurityPoliciesGetter interface { - PodSecurityPolicies() PodSecurityPolicyInterface -} - -// PodSecurityPolicyInterface has methods to work with PodSecurityPolicy resources. -type PodSecurityPolicyInterface interface { - Create(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.CreateOptions) (*v1beta1.PodSecurityPolicy, error) - Update(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.UpdateOptions) (*v1beta1.PodSecurityPolicy, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.PodSecurityPolicy, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.PodSecurityPolicyList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) - Apply(ctx context.Context, podSecurityPolicy *extensionsv1beta1.PodSecurityPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodSecurityPolicy, err error) - PodSecurityPolicyExpansion -} - -// podSecurityPolicies implements PodSecurityPolicyInterface -type podSecurityPolicies struct { - client rest.Interface -} - -// newPodSecurityPolicies returns a PodSecurityPolicies -func newPodSecurityPolicies(c *ExtensionsV1beta1Client) *podSecurityPolicies { - return &podSecurityPolicies{ - client: c.RESTClient(), - } -} - -// Get takes name of the podSecurityPolicy, and returns the corresponding podSecurityPolicy object, and an error if there is any. -func (c *podSecurityPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PodSecurityPolicy, err error) { - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Get(). - Resource("podsecuritypolicies"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors. -func (c *podSecurityPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.PodSecurityPolicyList{} - err = c.client.Get(). - Resource("podsecuritypolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podSecurityPolicies. -func (c *podSecurityPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("podsecuritypolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a podSecurityPolicy and creates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *podSecurityPolicies) Create(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.CreateOptions) (result *v1beta1.PodSecurityPolicy, err error) { - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Post(). - Resource("podsecuritypolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podSecurityPolicy). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a podSecurityPolicy and updates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *podSecurityPolicies) Update(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.UpdateOptions) (result *v1beta1.PodSecurityPolicy, err error) { - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Put(). - Resource("podsecuritypolicies"). - Name(podSecurityPolicy.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podSecurityPolicy). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the podSecurityPolicy and deletes it. Returns an error if one occurs. -func (c *podSecurityPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("podsecuritypolicies"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *podSecurityPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("podsecuritypolicies"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched podSecurityPolicy. -func (c *podSecurityPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) { - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Patch(pt). - Resource("podsecuritypolicies"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied podSecurityPolicy. -func (c *podSecurityPolicies) Apply(ctx context.Context, podSecurityPolicy *extensionsv1beta1.PodSecurityPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodSecurityPolicy, err error) { - if podSecurityPolicy == nil { - return nil, fmt.Errorf("podSecurityPolicy provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(podSecurityPolicy) - if err != nil { - return nil, err - } - name := podSecurityPolicy.Name - if name == nil { - return nil, fmt.Errorf("podSecurityPolicy.Name must be provided to Apply") - } - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Patch(types.ApplyPatchType). - Resource("podsecuritypolicies"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/generated_expansion.go index ab41abb7d0..9c2979d6c4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/generated_expansion.go @@ -19,3 +19,5 @@ limitations under the License. package v1alpha1 type ClusterCIDRExpansion interface{} + +type IPAddressExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/ipaddress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/ipaddress.go new file mode 100644 index 0000000000..fff193d68d --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/ipaddress.go @@ -0,0 +1,197 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + v1alpha1 "k8s.io/api/networking/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + networkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// IPAddressesGetter has a method to return a IPAddressInterface. +// A group's client should implement this interface. +type IPAddressesGetter interface { + IPAddresses() IPAddressInterface +} + +// IPAddressInterface has methods to work with IPAddress resources. +type IPAddressInterface interface { + Create(ctx context.Context, iPAddress *v1alpha1.IPAddress, opts v1.CreateOptions) (*v1alpha1.IPAddress, error) + Update(ctx context.Context, iPAddress *v1alpha1.IPAddress, opts v1.UpdateOptions) (*v1alpha1.IPAddress, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.IPAddress, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.IPAddressList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.IPAddress, err error) + Apply(ctx context.Context, iPAddress *networkingv1alpha1.IPAddressApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.IPAddress, err error) + IPAddressExpansion +} + +// iPAddresses implements IPAddressInterface +type iPAddresses struct { + client rest.Interface +} + +// newIPAddresses returns a IPAddresses +func newIPAddresses(c *NetworkingV1alpha1Client) *iPAddresses { + return &iPAddresses{ + client: c.RESTClient(), + } +} + +// Get takes name of the iPAddress, and returns the corresponding iPAddress object, and an error if there is any. +func (c *iPAddresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.IPAddress, err error) { + result = &v1alpha1.IPAddress{} + err = c.client.Get(). + Resource("ipaddresses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of IPAddresses that match those selectors. +func (c *iPAddresses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.IPAddressList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.IPAddressList{} + err = c.client.Get(). + Resource("ipaddresses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested iPAddresses. +func (c *iPAddresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("ipaddresses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a iPAddress and creates it. Returns the server's representation of the iPAddress, and an error, if there is any. +func (c *iPAddresses) Create(ctx context.Context, iPAddress *v1alpha1.IPAddress, opts v1.CreateOptions) (result *v1alpha1.IPAddress, err error) { + result = &v1alpha1.IPAddress{} + err = c.client.Post(). + Resource("ipaddresses"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(iPAddress). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a iPAddress and updates it. Returns the server's representation of the iPAddress, and an error, if there is any. +func (c *iPAddresses) Update(ctx context.Context, iPAddress *v1alpha1.IPAddress, opts v1.UpdateOptions) (result *v1alpha1.IPAddress, err error) { + result = &v1alpha1.IPAddress{} + err = c.client.Put(). + Resource("ipaddresses"). + Name(iPAddress.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(iPAddress). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the iPAddress and deletes it. Returns an error if one occurs. +func (c *iPAddresses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("ipaddresses"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *iPAddresses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("ipaddresses"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched iPAddress. +func (c *iPAddresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.IPAddress, err error) { + result = &v1alpha1.IPAddress{} + err = c.client.Patch(pt). + Resource("ipaddresses"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied iPAddress. +func (c *iPAddresses) Apply(ctx context.Context, iPAddress *networkingv1alpha1.IPAddressApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.IPAddress, err error) { + if iPAddress == nil { + return nil, fmt.Errorf("iPAddress provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(iPAddress) + if err != nil { + return nil, err + } + name := iPAddress.Name + if name == nil { + return nil, fmt.Errorf("iPAddress.Name must be provided to Apply") + } + result = &v1alpha1.IPAddress{} + err = c.client.Patch(types.ApplyPatchType). + Resource("ipaddresses"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go index ccb5933163..884c846f59 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go @@ -29,6 +29,7 @@ import ( type NetworkingV1alpha1Interface interface { RESTClient() rest.Interface ClusterCIDRsGetter + IPAddressesGetter } // NetworkingV1alpha1Client is used to interact with features provided by the networking.k8s.io group. @@ -40,6 +41,10 @@ func (c *NetworkingV1alpha1Client) ClusterCIDRs() ClusterCIDRInterface { return newClusterCIDRs(c) } +func (c *NetworkingV1alpha1Client) IPAddresses() IPAddressInterface { + return newIPAddresses(c) +} + // NewForConfig creates a new NetworkingV1alpha1Client for the given config. // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/podscheduling.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/podscheduling.go deleted file mode 100644 index e163a84561..0000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/podscheduling.go +++ /dev/null @@ -1,256 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - json "encoding/json" - "fmt" - "time" - - v1alpha1 "k8s.io/api/resource/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha1 "k8s.io/client-go/applyconfigurations/resource/v1alpha1" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// PodSchedulingsGetter has a method to return a PodSchedulingInterface. -// A group's client should implement this interface. -type PodSchedulingsGetter interface { - PodSchedulings(namespace string) PodSchedulingInterface -} - -// PodSchedulingInterface has methods to work with PodScheduling resources. -type PodSchedulingInterface interface { - Create(ctx context.Context, podScheduling *v1alpha1.PodScheduling, opts v1.CreateOptions) (*v1alpha1.PodScheduling, error) - Update(ctx context.Context, podScheduling *v1alpha1.PodScheduling, opts v1.UpdateOptions) (*v1alpha1.PodScheduling, error) - UpdateStatus(ctx context.Context, podScheduling *v1alpha1.PodScheduling, opts v1.UpdateOptions) (*v1alpha1.PodScheduling, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.PodScheduling, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.PodSchedulingList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PodScheduling, err error) - Apply(ctx context.Context, podScheduling *resourcev1alpha1.PodSchedulingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PodScheduling, err error) - ApplyStatus(ctx context.Context, podScheduling *resourcev1alpha1.PodSchedulingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PodScheduling, err error) - PodSchedulingExpansion -} - -// podSchedulings implements PodSchedulingInterface -type podSchedulings struct { - client rest.Interface - ns string -} - -// newPodSchedulings returns a PodSchedulings -func newPodSchedulings(c *ResourceV1alpha1Client, namespace string) *podSchedulings { - return &podSchedulings{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the podScheduling, and returns the corresponding podScheduling object, and an error if there is any. -func (c *podSchedulings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PodScheduling, err error) { - result = &v1alpha1.PodScheduling{} - err = c.client.Get(). - Namespace(c.ns). - Resource("podschedulings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodSchedulings that match those selectors. -func (c *podSchedulings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PodSchedulingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.PodSchedulingList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("podschedulings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podSchedulings. -func (c *podSchedulings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("podschedulings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a podScheduling and creates it. Returns the server's representation of the podScheduling, and an error, if there is any. -func (c *podSchedulings) Create(ctx context.Context, podScheduling *v1alpha1.PodScheduling, opts v1.CreateOptions) (result *v1alpha1.PodScheduling, err error) { - result = &v1alpha1.PodScheduling{} - err = c.client.Post(). - Namespace(c.ns). - Resource("podschedulings"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podScheduling). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a podScheduling and updates it. Returns the server's representation of the podScheduling, and an error, if there is any. -func (c *podSchedulings) Update(ctx context.Context, podScheduling *v1alpha1.PodScheduling, opts v1.UpdateOptions) (result *v1alpha1.PodScheduling, err error) { - result = &v1alpha1.PodScheduling{} - err = c.client.Put(). - Namespace(c.ns). - Resource("podschedulings"). - Name(podScheduling.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podScheduling). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *podSchedulings) UpdateStatus(ctx context.Context, podScheduling *v1alpha1.PodScheduling, opts v1.UpdateOptions) (result *v1alpha1.PodScheduling, err error) { - result = &v1alpha1.PodScheduling{} - err = c.client.Put(). - Namespace(c.ns). - Resource("podschedulings"). - Name(podScheduling.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podScheduling). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the podScheduling and deletes it. Returns an error if one occurs. -func (c *podSchedulings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("podschedulings"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *podSchedulings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("podschedulings"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched podScheduling. -func (c *podSchedulings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PodScheduling, err error) { - result = &v1alpha1.PodScheduling{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("podschedulings"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied podScheduling. -func (c *podSchedulings) Apply(ctx context.Context, podScheduling *resourcev1alpha1.PodSchedulingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PodScheduling, err error) { - if podScheduling == nil { - return nil, fmt.Errorf("podScheduling provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(podScheduling) - if err != nil { - return nil, err - } - name := podScheduling.Name - if name == nil { - return nil, fmt.Errorf("podScheduling.Name must be provided to Apply") - } - result = &v1alpha1.PodScheduling{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("podschedulings"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *podSchedulings) ApplyStatus(ctx context.Context, podScheduling *resourcev1alpha1.PodSchedulingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PodScheduling, err error) { - if podScheduling == nil { - return nil, fmt.Errorf("podScheduling provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(podScheduling) - if err != nil { - return nil, err - } - - name := podScheduling.Name - if name == nil { - return nil, fmt.Errorf("podScheduling.Name must be provided to Apply") - } - - result = &v1alpha1.PodScheduling{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("podschedulings"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/doc.go new file mode 100644 index 0000000000..baaf2d9853 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha2 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/generated_expansion.go similarity index 92% rename from vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/generated_expansion.go rename to vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/generated_expansion.go index df88c2f93b..2c02e9ce74 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/generated_expansion.go @@ -16,9 +16,9 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 -type PodSchedulingExpansion interface{} +type PodSchedulingContextExpansion interface{} type ResourceClaimExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/podschedulingcontext.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/podschedulingcontext.go new file mode 100644 index 0000000000..72e81a29e3 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/podschedulingcontext.go @@ -0,0 +1,256 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + v1alpha2 "k8s.io/api/resource/v1alpha2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// PodSchedulingContextsGetter has a method to return a PodSchedulingContextInterface. +// A group's client should implement this interface. +type PodSchedulingContextsGetter interface { + PodSchedulingContexts(namespace string) PodSchedulingContextInterface +} + +// PodSchedulingContextInterface has methods to work with PodSchedulingContext resources. +type PodSchedulingContextInterface interface { + Create(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.CreateOptions) (*v1alpha2.PodSchedulingContext, error) + Update(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.UpdateOptions) (*v1alpha2.PodSchedulingContext, error) + UpdateStatus(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.UpdateOptions) (*v1alpha2.PodSchedulingContext, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.PodSchedulingContext, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.PodSchedulingContextList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.PodSchedulingContext, err error) + Apply(ctx context.Context, podSchedulingContext *resourcev1alpha2.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodSchedulingContext, err error) + ApplyStatus(ctx context.Context, podSchedulingContext *resourcev1alpha2.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodSchedulingContext, err error) + PodSchedulingContextExpansion +} + +// podSchedulingContexts implements PodSchedulingContextInterface +type podSchedulingContexts struct { + client rest.Interface + ns string +} + +// newPodSchedulingContexts returns a PodSchedulingContexts +func newPodSchedulingContexts(c *ResourceV1alpha2Client, namespace string) *podSchedulingContexts { + return &podSchedulingContexts{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the podSchedulingContext, and returns the corresponding podSchedulingContext object, and an error if there is any. +func (c *podSchedulingContexts) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.PodSchedulingContext, err error) { + result = &v1alpha2.PodSchedulingContext{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podschedulingcontexts"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PodSchedulingContexts that match those selectors. +func (c *podSchedulingContexts) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.PodSchedulingContextList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha2.PodSchedulingContextList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podschedulingcontexts"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested podSchedulingContexts. +func (c *podSchedulingContexts) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("podschedulingcontexts"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a podSchedulingContext and creates it. Returns the server's representation of the podSchedulingContext, and an error, if there is any. +func (c *podSchedulingContexts) Create(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.CreateOptions) (result *v1alpha2.PodSchedulingContext, err error) { + result = &v1alpha2.PodSchedulingContext{} + err = c.client.Post(). + Namespace(c.ns). + Resource("podschedulingcontexts"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(podSchedulingContext). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a podSchedulingContext and updates it. Returns the server's representation of the podSchedulingContext, and an error, if there is any. +func (c *podSchedulingContexts) Update(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.UpdateOptions) (result *v1alpha2.PodSchedulingContext, err error) { + result = &v1alpha2.PodSchedulingContext{} + err = c.client.Put(). + Namespace(c.ns). + Resource("podschedulingcontexts"). + Name(podSchedulingContext.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(podSchedulingContext). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *podSchedulingContexts) UpdateStatus(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.UpdateOptions) (result *v1alpha2.PodSchedulingContext, err error) { + result = &v1alpha2.PodSchedulingContext{} + err = c.client.Put(). + Namespace(c.ns). + Resource("podschedulingcontexts"). + Name(podSchedulingContext.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(podSchedulingContext). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the podSchedulingContext and deletes it. Returns an error if one occurs. +func (c *podSchedulingContexts) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("podschedulingcontexts"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *podSchedulingContexts) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("podschedulingcontexts"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched podSchedulingContext. +func (c *podSchedulingContexts) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.PodSchedulingContext, err error) { + result = &v1alpha2.PodSchedulingContext{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("podschedulingcontexts"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied podSchedulingContext. +func (c *podSchedulingContexts) Apply(ctx context.Context, podSchedulingContext *resourcev1alpha2.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodSchedulingContext, err error) { + if podSchedulingContext == nil { + return nil, fmt.Errorf("podSchedulingContext provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(podSchedulingContext) + if err != nil { + return nil, err + } + name := podSchedulingContext.Name + if name == nil { + return nil, fmt.Errorf("podSchedulingContext.Name must be provided to Apply") + } + result = &v1alpha2.PodSchedulingContext{} + err = c.client.Patch(types.ApplyPatchType). + Namespace(c.ns). + Resource("podschedulingcontexts"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *podSchedulingContexts) ApplyStatus(ctx context.Context, podSchedulingContext *resourcev1alpha2.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodSchedulingContext, err error) { + if podSchedulingContext == nil { + return nil, fmt.Errorf("podSchedulingContext provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(podSchedulingContext) + if err != nil { + return nil, err + } + + name := podSchedulingContext.Name + if name == nil { + return nil, fmt.Errorf("podSchedulingContext.Name must be provided to Apply") + } + + result = &v1alpha2.PodSchedulingContext{} + err = c.client.Patch(types.ApplyPatchType). + Namespace(c.ns). + Resource("podschedulingcontexts"). + Name(*name). + SubResource("status"). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resource_client.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resource_client.go similarity index 66% rename from vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resource_client.go rename to vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resource_client.go index 2355bf7ccb..d5795fd628 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resource_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resource_client.go @@ -16,49 +16,49 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( "net/http" - v1alpha1 "k8s.io/api/resource/v1alpha1" + v1alpha2 "k8s.io/api/resource/v1alpha2" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) -type ResourceV1alpha1Interface interface { +type ResourceV1alpha2Interface interface { RESTClient() rest.Interface - PodSchedulingsGetter + PodSchedulingContextsGetter ResourceClaimsGetter ResourceClaimTemplatesGetter ResourceClassesGetter } -// ResourceV1alpha1Client is used to interact with features provided by the resource.k8s.io group. -type ResourceV1alpha1Client struct { +// ResourceV1alpha2Client is used to interact with features provided by the resource.k8s.io group. +type ResourceV1alpha2Client struct { restClient rest.Interface } -func (c *ResourceV1alpha1Client) PodSchedulings(namespace string) PodSchedulingInterface { - return newPodSchedulings(c, namespace) +func (c *ResourceV1alpha2Client) PodSchedulingContexts(namespace string) PodSchedulingContextInterface { + return newPodSchedulingContexts(c, namespace) } -func (c *ResourceV1alpha1Client) ResourceClaims(namespace string) ResourceClaimInterface { +func (c *ResourceV1alpha2Client) ResourceClaims(namespace string) ResourceClaimInterface { return newResourceClaims(c, namespace) } -func (c *ResourceV1alpha1Client) ResourceClaimTemplates(namespace string) ResourceClaimTemplateInterface { +func (c *ResourceV1alpha2Client) ResourceClaimTemplates(namespace string) ResourceClaimTemplateInterface { return newResourceClaimTemplates(c, namespace) } -func (c *ResourceV1alpha1Client) ResourceClasses() ResourceClassInterface { +func (c *ResourceV1alpha2Client) ResourceClasses() ResourceClassInterface { return newResourceClasses(c) } -// NewForConfig creates a new ResourceV1alpha1Client for the given config. +// NewForConfig creates a new ResourceV1alpha2Client for the given config. // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). -func NewForConfig(c *rest.Config) (*ResourceV1alpha1Client, error) { +func NewForConfig(c *rest.Config) (*ResourceV1alpha2Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err @@ -70,9 +70,9 @@ func NewForConfig(c *rest.Config) (*ResourceV1alpha1Client, error) { return NewForConfigAndClient(&config, httpClient) } -// NewForConfigAndClient creates a new ResourceV1alpha1Client for the given config and http client. +// NewForConfigAndClient creates a new ResourceV1alpha2Client for the given config and http client. // Note the http client provided takes precedence over the configured transport values. -func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ResourceV1alpha1Client, error) { +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ResourceV1alpha2Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err @@ -81,12 +81,12 @@ func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ResourceV1alpha1Cli if err != nil { return nil, err } - return &ResourceV1alpha1Client{client}, nil + return &ResourceV1alpha2Client{client}, nil } -// NewForConfigOrDie creates a new ResourceV1alpha1Client for the given config and +// NewForConfigOrDie creates a new ResourceV1alpha2Client for the given config and // panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *ResourceV1alpha1Client { +func NewForConfigOrDie(c *rest.Config) *ResourceV1alpha2Client { client, err := NewForConfig(c) if err != nil { panic(err) @@ -94,13 +94,13 @@ func NewForConfigOrDie(c *rest.Config) *ResourceV1alpha1Client { return client } -// New creates a new ResourceV1alpha1Client for the given RESTClient. -func New(c rest.Interface) *ResourceV1alpha1Client { - return &ResourceV1alpha1Client{c} +// New creates a new ResourceV1alpha2Client for the given RESTClient. +func New(c rest.Interface) *ResourceV1alpha2Client { + return &ResourceV1alpha2Client{c} } func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion + gv := v1alpha2.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() @@ -114,7 +114,7 @@ func setConfigDefaults(config *rest.Config) error { // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. -func (c *ResourceV1alpha1Client) RESTClient() rest.Interface { +func (c *ResourceV1alpha2Client) RESTClient() rest.Interface { if c == nil { return nil } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resourceclaim.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaim.go similarity index 79% rename from vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resourceclaim.go rename to vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaim.go index cd2d0c7821..cfb27c9db6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resourceclaim.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaim.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( "context" @@ -24,11 +24,11 @@ import ( "fmt" "time" - v1alpha1 "k8s.io/api/resource/v1alpha1" + v1alpha2 "k8s.io/api/resource/v1alpha2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha1 "k8s.io/client-go/applyconfigurations/resource/v1alpha1" + resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -41,17 +41,17 @@ type ResourceClaimsGetter interface { // ResourceClaimInterface has methods to work with ResourceClaim resources. type ResourceClaimInterface interface { - Create(ctx context.Context, resourceClaim *v1alpha1.ResourceClaim, opts v1.CreateOptions) (*v1alpha1.ResourceClaim, error) - Update(ctx context.Context, resourceClaim *v1alpha1.ResourceClaim, opts v1.UpdateOptions) (*v1alpha1.ResourceClaim, error) - UpdateStatus(ctx context.Context, resourceClaim *v1alpha1.ResourceClaim, opts v1.UpdateOptions) (*v1alpha1.ResourceClaim, error) + Create(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.CreateOptions) (*v1alpha2.ResourceClaim, error) + Update(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.UpdateOptions) (*v1alpha2.ResourceClaim, error) + UpdateStatus(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.UpdateOptions) (*v1alpha2.ResourceClaim, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ResourceClaim, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ResourceClaimList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.ResourceClaim, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.ResourceClaimList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ResourceClaim, err error) - Apply(ctx context.Context, resourceClaim *resourcev1alpha1.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ResourceClaim, err error) - ApplyStatus(ctx context.Context, resourceClaim *resourcev1alpha1.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ResourceClaim, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaim, err error) + Apply(ctx context.Context, resourceClaim *resourcev1alpha2.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaim, err error) + ApplyStatus(ctx context.Context, resourceClaim *resourcev1alpha2.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaim, err error) ResourceClaimExpansion } @@ -62,7 +62,7 @@ type resourceClaims struct { } // newResourceClaims returns a ResourceClaims -func newResourceClaims(c *ResourceV1alpha1Client, namespace string) *resourceClaims { +func newResourceClaims(c *ResourceV1alpha2Client, namespace string) *resourceClaims { return &resourceClaims{ client: c.RESTClient(), ns: namespace, @@ -70,8 +70,8 @@ func newResourceClaims(c *ResourceV1alpha1Client, namespace string) *resourceCla } // Get takes name of the resourceClaim, and returns the corresponding resourceClaim object, and an error if there is any. -func (c *resourceClaims) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ResourceClaim, err error) { - result = &v1alpha1.ResourceClaim{} +func (c *resourceClaims) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClaim, err error) { + result = &v1alpha2.ResourceClaim{} err = c.client.Get(). Namespace(c.ns). Resource("resourceclaims"). @@ -83,12 +83,12 @@ func (c *resourceClaims) Get(ctx context.Context, name string, options v1.GetOpt } // List takes label and field selectors, and returns the list of ResourceClaims that match those selectors. -func (c *resourceClaims) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ResourceClaimList, err error) { +func (c *resourceClaims) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClaimList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second } - result = &v1alpha1.ResourceClaimList{} + result = &v1alpha2.ResourceClaimList{} err = c.client.Get(). Namespace(c.ns). Resource("resourceclaims"). @@ -115,8 +115,8 @@ func (c *resourceClaims) Watch(ctx context.Context, opts v1.ListOptions) (watch. } // Create takes the representation of a resourceClaim and creates it. Returns the server's representation of the resourceClaim, and an error, if there is any. -func (c *resourceClaims) Create(ctx context.Context, resourceClaim *v1alpha1.ResourceClaim, opts v1.CreateOptions) (result *v1alpha1.ResourceClaim, err error) { - result = &v1alpha1.ResourceClaim{} +func (c *resourceClaims) Create(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.CreateOptions) (result *v1alpha2.ResourceClaim, err error) { + result = &v1alpha2.ResourceClaim{} err = c.client.Post(). Namespace(c.ns). Resource("resourceclaims"). @@ -128,8 +128,8 @@ func (c *resourceClaims) Create(ctx context.Context, resourceClaim *v1alpha1.Res } // Update takes the representation of a resourceClaim and updates it. Returns the server's representation of the resourceClaim, and an error, if there is any. -func (c *resourceClaims) Update(ctx context.Context, resourceClaim *v1alpha1.ResourceClaim, opts v1.UpdateOptions) (result *v1alpha1.ResourceClaim, err error) { - result = &v1alpha1.ResourceClaim{} +func (c *resourceClaims) Update(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.UpdateOptions) (result *v1alpha2.ResourceClaim, err error) { + result = &v1alpha2.ResourceClaim{} err = c.client.Put(). Namespace(c.ns). Resource("resourceclaims"). @@ -143,8 +143,8 @@ func (c *resourceClaims) Update(ctx context.Context, resourceClaim *v1alpha1.Res // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *resourceClaims) UpdateStatus(ctx context.Context, resourceClaim *v1alpha1.ResourceClaim, opts v1.UpdateOptions) (result *v1alpha1.ResourceClaim, err error) { - result = &v1alpha1.ResourceClaim{} +func (c *resourceClaims) UpdateStatus(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.UpdateOptions) (result *v1alpha2.ResourceClaim, err error) { + result = &v1alpha2.ResourceClaim{} err = c.client.Put(). Namespace(c.ns). Resource("resourceclaims"). @@ -185,8 +185,8 @@ func (c *resourceClaims) DeleteCollection(ctx context.Context, opts v1.DeleteOpt } // Patch applies the patch and returns the patched resourceClaim. -func (c *resourceClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ResourceClaim, err error) { - result = &v1alpha1.ResourceClaim{} +func (c *resourceClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaim, err error) { + result = &v1alpha2.ResourceClaim{} err = c.client.Patch(pt). Namespace(c.ns). Resource("resourceclaims"). @@ -200,7 +200,7 @@ func (c *resourceClaims) Patch(ctx context.Context, name string, pt types.PatchT } // Apply takes the given apply declarative configuration, applies it and returns the applied resourceClaim. -func (c *resourceClaims) Apply(ctx context.Context, resourceClaim *resourcev1alpha1.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ResourceClaim, err error) { +func (c *resourceClaims) Apply(ctx context.Context, resourceClaim *resourcev1alpha2.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaim, err error) { if resourceClaim == nil { return nil, fmt.Errorf("resourceClaim provided to Apply must not be nil") } @@ -213,7 +213,7 @@ func (c *resourceClaims) Apply(ctx context.Context, resourceClaim *resourcev1alp if name == nil { return nil, fmt.Errorf("resourceClaim.Name must be provided to Apply") } - result = &v1alpha1.ResourceClaim{} + result = &v1alpha2.ResourceClaim{} err = c.client.Patch(types.ApplyPatchType). Namespace(c.ns). Resource("resourceclaims"). @@ -227,7 +227,7 @@ func (c *resourceClaims) Apply(ctx context.Context, resourceClaim *resourcev1alp // ApplyStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *resourceClaims) ApplyStatus(ctx context.Context, resourceClaim *resourcev1alpha1.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ResourceClaim, err error) { +func (c *resourceClaims) ApplyStatus(ctx context.Context, resourceClaim *resourcev1alpha2.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaim, err error) { if resourceClaim == nil { return nil, fmt.Errorf("resourceClaim provided to Apply must not be nil") } @@ -242,7 +242,7 @@ func (c *resourceClaims) ApplyStatus(ctx context.Context, resourceClaim *resourc return nil, fmt.Errorf("resourceClaim.Name must be provided to Apply") } - result = &v1alpha1.ResourceClaim{} + result = &v1alpha2.ResourceClaim{} err = c.client.Patch(types.ApplyPatchType). Namespace(c.ns). Resource("resourceclaims"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resourceclaimtemplate.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaimtemplate.go similarity index 80% rename from vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resourceclaimtemplate.go rename to vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaimtemplate.go index b6cc3d96ec..3f4e320064 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resourceclaimtemplate.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaimtemplate.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( "context" @@ -24,11 +24,11 @@ import ( "fmt" "time" - v1alpha1 "k8s.io/api/resource/v1alpha1" + v1alpha2 "k8s.io/api/resource/v1alpha2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha1 "k8s.io/client-go/applyconfigurations/resource/v1alpha1" + resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -41,15 +41,15 @@ type ResourceClaimTemplatesGetter interface { // ResourceClaimTemplateInterface has methods to work with ResourceClaimTemplate resources. type ResourceClaimTemplateInterface interface { - Create(ctx context.Context, resourceClaimTemplate *v1alpha1.ResourceClaimTemplate, opts v1.CreateOptions) (*v1alpha1.ResourceClaimTemplate, error) - Update(ctx context.Context, resourceClaimTemplate *v1alpha1.ResourceClaimTemplate, opts v1.UpdateOptions) (*v1alpha1.ResourceClaimTemplate, error) + Create(ctx context.Context, resourceClaimTemplate *v1alpha2.ResourceClaimTemplate, opts v1.CreateOptions) (*v1alpha2.ResourceClaimTemplate, error) + Update(ctx context.Context, resourceClaimTemplate *v1alpha2.ResourceClaimTemplate, opts v1.UpdateOptions) (*v1alpha2.ResourceClaimTemplate, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ResourceClaimTemplate, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ResourceClaimTemplateList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.ResourceClaimTemplate, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.ResourceClaimTemplateList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ResourceClaimTemplate, err error) - Apply(ctx context.Context, resourceClaimTemplate *resourcev1alpha1.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ResourceClaimTemplate, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaimTemplate, err error) + Apply(ctx context.Context, resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaimTemplate, err error) ResourceClaimTemplateExpansion } @@ -60,7 +60,7 @@ type resourceClaimTemplates struct { } // newResourceClaimTemplates returns a ResourceClaimTemplates -func newResourceClaimTemplates(c *ResourceV1alpha1Client, namespace string) *resourceClaimTemplates { +func newResourceClaimTemplates(c *ResourceV1alpha2Client, namespace string) *resourceClaimTemplates { return &resourceClaimTemplates{ client: c.RESTClient(), ns: namespace, @@ -68,8 +68,8 @@ func newResourceClaimTemplates(c *ResourceV1alpha1Client, namespace string) *res } // Get takes name of the resourceClaimTemplate, and returns the corresponding resourceClaimTemplate object, and an error if there is any. -func (c *resourceClaimTemplates) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ResourceClaimTemplate, err error) { - result = &v1alpha1.ResourceClaimTemplate{} +func (c *resourceClaimTemplates) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { + result = &v1alpha2.ResourceClaimTemplate{} err = c.client.Get(). Namespace(c.ns). Resource("resourceclaimtemplates"). @@ -81,12 +81,12 @@ func (c *resourceClaimTemplates) Get(ctx context.Context, name string, options v } // List takes label and field selectors, and returns the list of ResourceClaimTemplates that match those selectors. -func (c *resourceClaimTemplates) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ResourceClaimTemplateList, err error) { +func (c *resourceClaimTemplates) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClaimTemplateList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second } - result = &v1alpha1.ResourceClaimTemplateList{} + result = &v1alpha2.ResourceClaimTemplateList{} err = c.client.Get(). Namespace(c.ns). Resource("resourceclaimtemplates"). @@ -113,8 +113,8 @@ func (c *resourceClaimTemplates) Watch(ctx context.Context, opts v1.ListOptions) } // Create takes the representation of a resourceClaimTemplate and creates it. Returns the server's representation of the resourceClaimTemplate, and an error, if there is any. -func (c *resourceClaimTemplates) Create(ctx context.Context, resourceClaimTemplate *v1alpha1.ResourceClaimTemplate, opts v1.CreateOptions) (result *v1alpha1.ResourceClaimTemplate, err error) { - result = &v1alpha1.ResourceClaimTemplate{} +func (c *resourceClaimTemplates) Create(ctx context.Context, resourceClaimTemplate *v1alpha2.ResourceClaimTemplate, opts v1.CreateOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { + result = &v1alpha2.ResourceClaimTemplate{} err = c.client.Post(). Namespace(c.ns). Resource("resourceclaimtemplates"). @@ -126,8 +126,8 @@ func (c *resourceClaimTemplates) Create(ctx context.Context, resourceClaimTempla } // Update takes the representation of a resourceClaimTemplate and updates it. Returns the server's representation of the resourceClaimTemplate, and an error, if there is any. -func (c *resourceClaimTemplates) Update(ctx context.Context, resourceClaimTemplate *v1alpha1.ResourceClaimTemplate, opts v1.UpdateOptions) (result *v1alpha1.ResourceClaimTemplate, err error) { - result = &v1alpha1.ResourceClaimTemplate{} +func (c *resourceClaimTemplates) Update(ctx context.Context, resourceClaimTemplate *v1alpha2.ResourceClaimTemplate, opts v1.UpdateOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { + result = &v1alpha2.ResourceClaimTemplate{} err = c.client.Put(). Namespace(c.ns). Resource("resourceclaimtemplates"). @@ -167,8 +167,8 @@ func (c *resourceClaimTemplates) DeleteCollection(ctx context.Context, opts v1.D } // Patch applies the patch and returns the patched resourceClaimTemplate. -func (c *resourceClaimTemplates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ResourceClaimTemplate, err error) { - result = &v1alpha1.ResourceClaimTemplate{} +func (c *resourceClaimTemplates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaimTemplate, err error) { + result = &v1alpha2.ResourceClaimTemplate{} err = c.client.Patch(pt). Namespace(c.ns). Resource("resourceclaimtemplates"). @@ -182,7 +182,7 @@ func (c *resourceClaimTemplates) Patch(ctx context.Context, name string, pt type } // Apply takes the given apply declarative configuration, applies it and returns the applied resourceClaimTemplate. -func (c *resourceClaimTemplates) Apply(ctx context.Context, resourceClaimTemplate *resourcev1alpha1.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ResourceClaimTemplate, err error) { +func (c *resourceClaimTemplates) Apply(ctx context.Context, resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { if resourceClaimTemplate == nil { return nil, fmt.Errorf("resourceClaimTemplate provided to Apply must not be nil") } @@ -195,7 +195,7 @@ func (c *resourceClaimTemplates) Apply(ctx context.Context, resourceClaimTemplat if name == nil { return nil, fmt.Errorf("resourceClaimTemplate.Name must be provided to Apply") } - result = &v1alpha1.ResourceClaimTemplate{} + result = &v1alpha2.ResourceClaimTemplate{} err = c.client.Patch(types.ApplyPatchType). Namespace(c.ns). Resource("resourceclaimtemplates"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resourceclass.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclass.go similarity index 80% rename from vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resourceclass.go rename to vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclass.go index 9c8b454639..95a4ac5668 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha1/resourceclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclass.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( "context" @@ -24,11 +24,11 @@ import ( "fmt" "time" - v1alpha1 "k8s.io/api/resource/v1alpha1" + v1alpha2 "k8s.io/api/resource/v1alpha2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha1 "k8s.io/client-go/applyconfigurations/resource/v1alpha1" + resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -41,15 +41,15 @@ type ResourceClassesGetter interface { // ResourceClassInterface has methods to work with ResourceClass resources. type ResourceClassInterface interface { - Create(ctx context.Context, resourceClass *v1alpha1.ResourceClass, opts v1.CreateOptions) (*v1alpha1.ResourceClass, error) - Update(ctx context.Context, resourceClass *v1alpha1.ResourceClass, opts v1.UpdateOptions) (*v1alpha1.ResourceClass, error) + Create(ctx context.Context, resourceClass *v1alpha2.ResourceClass, opts v1.CreateOptions) (*v1alpha2.ResourceClass, error) + Update(ctx context.Context, resourceClass *v1alpha2.ResourceClass, opts v1.UpdateOptions) (*v1alpha2.ResourceClass, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ResourceClass, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ResourceClassList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.ResourceClass, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.ResourceClassList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ResourceClass, err error) - Apply(ctx context.Context, resourceClass *resourcev1alpha1.ResourceClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ResourceClass, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClass, err error) + Apply(ctx context.Context, resourceClass *resourcev1alpha2.ResourceClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClass, err error) ResourceClassExpansion } @@ -59,15 +59,15 @@ type resourceClasses struct { } // newResourceClasses returns a ResourceClasses -func newResourceClasses(c *ResourceV1alpha1Client) *resourceClasses { +func newResourceClasses(c *ResourceV1alpha2Client) *resourceClasses { return &resourceClasses{ client: c.RESTClient(), } } // Get takes name of the resourceClass, and returns the corresponding resourceClass object, and an error if there is any. -func (c *resourceClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ResourceClass, err error) { - result = &v1alpha1.ResourceClass{} +func (c *resourceClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClass, err error) { + result = &v1alpha2.ResourceClass{} err = c.client.Get(). Resource("resourceclasses"). Name(name). @@ -78,12 +78,12 @@ func (c *resourceClasses) Get(ctx context.Context, name string, options v1.GetOp } // List takes label and field selectors, and returns the list of ResourceClasses that match those selectors. -func (c *resourceClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ResourceClassList, err error) { +func (c *resourceClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClassList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second } - result = &v1alpha1.ResourceClassList{} + result = &v1alpha2.ResourceClassList{} err = c.client.Get(). Resource("resourceclasses"). VersionedParams(&opts, scheme.ParameterCodec). @@ -108,8 +108,8 @@ func (c *resourceClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch } // Create takes the representation of a resourceClass and creates it. Returns the server's representation of the resourceClass, and an error, if there is any. -func (c *resourceClasses) Create(ctx context.Context, resourceClass *v1alpha1.ResourceClass, opts v1.CreateOptions) (result *v1alpha1.ResourceClass, err error) { - result = &v1alpha1.ResourceClass{} +func (c *resourceClasses) Create(ctx context.Context, resourceClass *v1alpha2.ResourceClass, opts v1.CreateOptions) (result *v1alpha2.ResourceClass, err error) { + result = &v1alpha2.ResourceClass{} err = c.client.Post(). Resource("resourceclasses"). VersionedParams(&opts, scheme.ParameterCodec). @@ -120,8 +120,8 @@ func (c *resourceClasses) Create(ctx context.Context, resourceClass *v1alpha1.Re } // Update takes the representation of a resourceClass and updates it. Returns the server's representation of the resourceClass, and an error, if there is any. -func (c *resourceClasses) Update(ctx context.Context, resourceClass *v1alpha1.ResourceClass, opts v1.UpdateOptions) (result *v1alpha1.ResourceClass, err error) { - result = &v1alpha1.ResourceClass{} +func (c *resourceClasses) Update(ctx context.Context, resourceClass *v1alpha2.ResourceClass, opts v1.UpdateOptions) (result *v1alpha2.ResourceClass, err error) { + result = &v1alpha2.ResourceClass{} err = c.client.Put(). Resource("resourceclasses"). Name(resourceClass.Name). @@ -158,8 +158,8 @@ func (c *resourceClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOp } // Patch applies the patch and returns the patched resourceClass. -func (c *resourceClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ResourceClass, err error) { - result = &v1alpha1.ResourceClass{} +func (c *resourceClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClass, err error) { + result = &v1alpha2.ResourceClass{} err = c.client.Patch(pt). Resource("resourceclasses"). Name(name). @@ -172,7 +172,7 @@ func (c *resourceClasses) Patch(ctx context.Context, name string, pt types.Patch } // Apply takes the given apply declarative configuration, applies it and returns the applied resourceClass. -func (c *resourceClasses) Apply(ctx context.Context, resourceClass *resourcev1alpha1.ResourceClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ResourceClass, err error) { +func (c *resourceClasses) Apply(ctx context.Context, resourceClass *resourcev1alpha2.ResourceClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClass, err error) { if resourceClass == nil { return nil, fmt.Errorf("resourceClass provided to Apply must not be nil") } @@ -185,7 +185,7 @@ func (c *resourceClasses) Apply(ctx context.Context, resourceClass *resourcev1al if name == nil { return nil, fmt.Errorf("resourceClass.Name must be provided to Apply") } - result = &v1alpha1.ResourceClass{} + result = &v1alpha2.ResourceClass{} err = c.client.Patch(types.ApplyPatchType). Resource("resourceclasses"). Name(*name). diff --git a/vendor/k8s.io/client-go/openapi/OWNERS b/vendor/k8s.io/client-go/openapi/OWNERS new file mode 100644 index 0000000000..e610094242 --- /dev/null +++ b/vendor/k8s.io/client-go/openapi/OWNERS @@ -0,0 +1,4 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - apelisse diff --git a/vendor/k8s.io/client-go/pkg/version/base.go b/vendor/k8s.io/client-go/pkg/version/base.go index 51e34dda33..676d51d321 100644 --- a/vendor/k8s.io/client-go/pkg/version/base.go +++ b/vendor/k8s.io/client-go/pkg/version/base.go @@ -43,7 +43,8 @@ var ( gitMinor string = "" // minor version, numeric possibly followed by "+" // semantic version, derived by build scripts (see - // https://git.k8s.io/community/contributors/design-proposals/release/versioning.md + // https://github.com/kubernetes/sig-release/blob/master/release-engineering/versioning.md#kubernetes-release-versioning + // https://kubernetes.io/releases/version-skew-policy/ // for a detailed discussion of this field) // // TODO: This field is still called "gitVersion" for legacy diff --git a/vendor/k8s.io/client-go/rest/client.go b/vendor/k8s.io/client-go/rest/client.go index 2cf821bcd7..60df7e568c 100644 --- a/vendor/k8s.io/client-go/rest/client.go +++ b/vendor/k8s.io/client-go/rest/client.go @@ -52,8 +52,7 @@ type Interface interface { // ClientContentConfig controls how RESTClient communicates with the server. // // TODO: ContentConfig will be updated to accept a Negotiator instead of a -// -// NegotiatedSerializer and NegotiatedSerializer will be removed. +// NegotiatedSerializer and NegotiatedSerializer will be removed. type ClientContentConfig struct { // AcceptContentTypes specifies the types the client will accept and is optional. // If not set, ContentType will be used to define the Accept header diff --git a/vendor/k8s.io/client-go/rest/request.go b/vendor/k8s.io/client-go/rest/request.go index 96e725692d..bb6fb4decb 100644 --- a/vendor/k8s.io/client-go/rest/request.go +++ b/vendor/k8s.io/client-go/rest/request.go @@ -481,7 +481,13 @@ func (r *Request) Body(obj interface{}) *Request { return r } -// URL returns the current working URL. +// Error returns any error encountered constructing the request, if any. +func (r *Request) Error() error { + return r.err +} + +// URL returns the current working URL. Check the result of Error() to ensure +// that the returned URL is valid. func (r *Request) URL() *url.URL { p := r.pathPrefix if r.namespaceSet && len(r.namespace) > 0 { @@ -726,7 +732,6 @@ func (r *Request) Watch(ctx context.Context) (watch.Interface, error) { } resp, err := client.Do(req) - updateURLMetrics(ctx, r, resp, err) retry.After(ctx, r, resp, err) if err == nil && resp.StatusCode == http.StatusOK { return r.newStreamWatcher(resp) @@ -786,22 +791,36 @@ func (r *Request) newStreamWatcher(resp *http.Response) (watch.Interface, error) ), nil } -// updateURLMetrics is a convenience function for pushing metrics. -// It also handles corner cases for incomplete/invalid request data. -func updateURLMetrics(ctx context.Context, req *Request, resp *http.Response, err error) { - url := "none" +// updateRequestResultMetric increments the RequestResult metric counter, +// it should be called with the (response, err) tuple from the final +// reply from the server. +func updateRequestResultMetric(ctx context.Context, req *Request, resp *http.Response, err error) { + code, host := sanitize(req, resp, err) + metrics.RequestResult.Increment(ctx, code, req.verb, host) +} + +// updateRequestRetryMetric increments the RequestRetry metric counter, +// it should be called with the (response, err) tuple for each retry +// except for the final attempt. +func updateRequestRetryMetric(ctx context.Context, req *Request, resp *http.Response, err error) { + code, host := sanitize(req, resp, err) + metrics.RequestRetry.IncrementRetry(ctx, code, req.verb, host) +} + +func sanitize(req *Request, resp *http.Response, err error) (string, string) { + host := "none" if req.c.base != nil { - url = req.c.base.Host + host = req.c.base.Host } // Errors can be arbitrary strings. Unbound label cardinality is not suitable for a metric // system so we just report them as ``. - if err != nil { - metrics.RequestResult.Increment(ctx, "", req.verb, url) - } else { - // Metrics for failure codes - metrics.RequestResult.Increment(ctx, strconv.Itoa(resp.StatusCode), req.verb, url) + code := "" + if resp != nil { + code = strconv.Itoa(resp.StatusCode) } + + return code, host } // Stream formats and executes the request, and offers streaming of the response. @@ -834,7 +853,6 @@ func (r *Request) Stream(ctx context.Context) (io.ReadCloser, error) { return nil, err } resp, err := client.Do(req) - updateURLMetrics(ctx, r, resp, err) retry.After(ctx, r, resp, err) if err != nil { // we only retry on an HTTP response with 'Retry-After' header @@ -979,7 +997,6 @@ func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Resp return err } resp, err := client.Do(req) - updateURLMetrics(ctx, r, resp, err) // The value -1 or a value of 0 with a non-nil Body indicates that the length is unknown. // https://pkg.go.dev/net/http#Request if req.ContentLength >= 0 && !(req.Body != nil && req.ContentLength == 0) { diff --git a/vendor/k8s.io/client-go/rest/with_retry.go b/vendor/k8s.io/client-go/rest/with_retry.go index 207060a5cc..eaaadc6a4c 100644 --- a/vendor/k8s.io/client-go/rest/with_retry.go +++ b/vendor/k8s.io/client-go/rest/with_retry.go @@ -242,8 +242,20 @@ func (r *withRetry) After(ctx context.Context, request *Request, resp *http.Resp // parameters calculated from the (response, err) tuple from // attempt N-1, so r.retryAfter is outdated and should not be // referred to here. + isRetry := r.retryAfter != nil r.retryAfter = nil + // the client finishes a single request after N attempts (1..N) + // - all attempts (1..N) are counted to the rest_client_requests_total + // metric (current behavior). + // - every attempt after the first (2..N) are counted to the + // rest_client_request_retries_total metric. + updateRequestResultMetric(ctx, request, resp, err) + if isRetry { + // this is attempt 2 or later + updateRequestRetryMetric(ctx, request, resp, err) + } + if request.c.base != nil { if err != nil { request.backoff.UpdateBackoff(request.URL(), err, 0) @@ -346,8 +358,12 @@ func retryAfterResponse() *http.Response { } func retryAfterResponseWithDelay(delay string) *http.Response { + return retryAfterResponseWithCodeAndDelay(http.StatusInternalServerError, delay) +} + +func retryAfterResponseWithCodeAndDelay(code int, delay string) *http.Response { return &http.Response{ - StatusCode: http.StatusInternalServerError, + StatusCode: code, Header: http.Header{"Retry-After": []string{delay}}, } } diff --git a/vendor/k8s.io/client-go/tools/cache/controller.go b/vendor/k8s.io/client-go/tools/cache/controller.go index 96005ff585..f437f28616 100644 --- a/vendor/k8s.io/client-go/tools/cache/controller.go +++ b/vendor/k8s.io/client-go/tools/cache/controller.go @@ -18,6 +18,7 @@ package cache import ( "errors" + "os" "sync" "time" @@ -50,11 +51,12 @@ type Config struct { Process ProcessFunc // ObjectType is an example object of the type this controller is - // expected to handle. Only the type needs to be right, except - // that when that is `unstructured.Unstructured` the object's - // `"apiVersion"` and `"kind"` must also be right. + // expected to handle. ObjectType runtime.Object + // ObjectDescription is the description to use when logging type-specific information about this controller. + ObjectDescription string + // FullResyncPeriod is the period at which ShouldResync is considered. FullResyncPeriod time.Duration @@ -84,7 +86,7 @@ type Config struct { type ShouldResyncFunc func() bool // ProcessFunc processes a single object. -type ProcessFunc func(obj interface{}) error +type ProcessFunc func(obj interface{}, isInInitialList bool) error // `*controller` implements Controller type controller struct { @@ -131,18 +133,24 @@ func (c *controller) Run(stopCh <-chan struct{}) { <-stopCh c.config.Queue.Close() }() - r := NewReflector( + r := NewReflectorWithOptions( c.config.ListerWatcher, c.config.ObjectType, c.config.Queue, - c.config.FullResyncPeriod, + ReflectorOptions{ + ResyncPeriod: c.config.FullResyncPeriod, + TypeDescription: c.config.ObjectDescription, + Clock: c.clock, + }, ) r.ShouldResync = c.config.ShouldResync r.WatchListPageSize = c.config.WatchListPageSize - r.clock = c.clock if c.config.WatchErrorHandler != nil { r.watchErrorHandler = c.config.WatchErrorHandler } + if s := os.Getenv("ENABLE_CLIENT_GO_WATCH_LIST_ALPHA"); len(s) > 0 { + r.UseWatchList = true + } c.reflectorMutex.Lock() c.reflector = r @@ -211,7 +219,7 @@ func (c *controller) processLoop() { // happen if the watch is closed and misses the delete event and we don't // notice the deletion until the subsequent re-list. type ResourceEventHandler interface { - OnAdd(obj interface{}) + OnAdd(obj interface{}, isInInitialList bool) OnUpdate(oldObj, newObj interface{}) OnDelete(obj interface{}) } @@ -220,6 +228,9 @@ type ResourceEventHandler interface { // as few of the notification functions as you want while still implementing // ResourceEventHandler. This adapter does not remove the prohibition against // modifying the objects. +// +// See ResourceEventHandlerDetailedFuncs if your use needs to propagate +// HasSynced. type ResourceEventHandlerFuncs struct { AddFunc func(obj interface{}) UpdateFunc func(oldObj, newObj interface{}) @@ -227,7 +238,7 @@ type ResourceEventHandlerFuncs struct { } // OnAdd calls AddFunc if it's not nil. -func (r ResourceEventHandlerFuncs) OnAdd(obj interface{}) { +func (r ResourceEventHandlerFuncs) OnAdd(obj interface{}, isInInitialList bool) { if r.AddFunc != nil { r.AddFunc(obj) } @@ -247,6 +258,36 @@ func (r ResourceEventHandlerFuncs) OnDelete(obj interface{}) { } } +// ResourceEventHandlerDetailedFuncs is exactly like ResourceEventHandlerFuncs +// except its AddFunc accepts the isInInitialList parameter, for propagating +// HasSynced. +type ResourceEventHandlerDetailedFuncs struct { + AddFunc func(obj interface{}, isInInitialList bool) + UpdateFunc func(oldObj, newObj interface{}) + DeleteFunc func(obj interface{}) +} + +// OnAdd calls AddFunc if it's not nil. +func (r ResourceEventHandlerDetailedFuncs) OnAdd(obj interface{}, isInInitialList bool) { + if r.AddFunc != nil { + r.AddFunc(obj, isInInitialList) + } +} + +// OnUpdate calls UpdateFunc if it's not nil. +func (r ResourceEventHandlerDetailedFuncs) OnUpdate(oldObj, newObj interface{}) { + if r.UpdateFunc != nil { + r.UpdateFunc(oldObj, newObj) + } +} + +// OnDelete calls DeleteFunc if it's not nil. +func (r ResourceEventHandlerDetailedFuncs) OnDelete(obj interface{}) { + if r.DeleteFunc != nil { + r.DeleteFunc(obj) + } +} + // FilteringResourceEventHandler applies the provided filter to all events coming // in, ensuring the appropriate nested handler method is invoked. An object // that starts passing the filter after an update is considered an add, and an @@ -258,11 +299,11 @@ type FilteringResourceEventHandler struct { } // OnAdd calls the nested handler only if the filter succeeds -func (r FilteringResourceEventHandler) OnAdd(obj interface{}) { +func (r FilteringResourceEventHandler) OnAdd(obj interface{}, isInInitialList bool) { if !r.FilterFunc(obj) { return } - r.Handler.OnAdd(obj) + r.Handler.OnAdd(obj, isInInitialList) } // OnUpdate ensures the proper handler is called depending on whether the filter matches @@ -273,7 +314,7 @@ func (r FilteringResourceEventHandler) OnUpdate(oldObj, newObj interface{}) { case newer && older: r.Handler.OnUpdate(oldObj, newObj) case newer && !older: - r.Handler.OnAdd(newObj) + r.Handler.OnAdd(newObj, false) case !newer && older: r.Handler.OnDelete(oldObj) default: @@ -401,6 +442,7 @@ func processDeltas( handler ResourceEventHandler, clientState Store, deltas Deltas, + isInInitialList bool, ) error { // from oldest to newest for _, d := range deltas { @@ -417,7 +459,7 @@ func processDeltas( if err := clientState.Add(obj); err != nil { return err } - handler.OnAdd(obj) + handler.OnAdd(obj, isInInitialList) } case Deleted: if err := clientState.Delete(obj); err != nil { @@ -466,9 +508,9 @@ func newInformer( FullResyncPeriod: resyncPeriod, RetryOnError: false, - Process: func(obj interface{}) error { + Process: func(obj interface{}, isInInitialList bool) error { if deltas, ok := obj.(Deltas); ok { - return processDeltas(h, clientState, deltas) + return processDeltas(h, clientState, deltas, isInInitialList) } return errors.New("object given as Process argument is not Deltas") }, diff --git a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go index 84f3ab9ca1..7160bb1ee7 100644 --- a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go +++ b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go @@ -300,6 +300,10 @@ func (f *DeltaFIFO) KeyOf(obj interface{}) (string, error) { func (f *DeltaFIFO) HasSynced() bool { f.lock.Lock() defer f.lock.Unlock() + return f.hasSynced_locked() +} + +func (f *DeltaFIFO) hasSynced_locked() bool { return f.populated && f.initialPopulationCount == 0 } @@ -570,6 +574,7 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) { f.cond.Wait() } + isInInitialList := !f.hasSynced_locked() id := f.queue[0] f.queue = f.queue[1:] depth := len(f.queue) @@ -595,7 +600,7 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) { utiltrace.Field{Key: "Reason", Value: "slow event handlers blocking the queue"}) defer trace.LogIfLong(100 * time.Millisecond) } - err := process(item) + err := process(item, isInInitialList) if e, ok := err.(ErrRequeue); ok { f.addIfNotPresent(id, item) err = e.Err diff --git a/vendor/k8s.io/client-go/tools/cache/fifo.go b/vendor/k8s.io/client-go/tools/cache/fifo.go index 8f3313783d..dd13c4ea77 100644 --- a/vendor/k8s.io/client-go/tools/cache/fifo.go +++ b/vendor/k8s.io/client-go/tools/cache/fifo.go @@ -25,7 +25,7 @@ import ( // PopProcessFunc is passed to Pop() method of Queue interface. // It is supposed to process the accumulator popped from the queue. -type PopProcessFunc func(interface{}) error +type PopProcessFunc func(obj interface{}, isInInitialList bool) error // ErrRequeue may be returned by a PopProcessFunc to safely requeue // the current item. The value of Err will be returned from Pop. @@ -82,9 +82,12 @@ type Queue interface { // Pop is helper function for popping from Queue. // WARNING: Do NOT use this function in non-test code to avoid races // unless you really really really really know what you are doing. +// +// NOTE: This function is deprecated and may be removed in the future without +// additional warning. func Pop(queue Queue) interface{} { var result interface{} - queue.Pop(func(obj interface{}) error { + queue.Pop(func(obj interface{}, isInInitialList bool) error { result = obj return nil }) @@ -149,6 +152,10 @@ func (f *FIFO) Close() { func (f *FIFO) HasSynced() bool { f.lock.Lock() defer f.lock.Unlock() + return f.hasSynced_locked() +} + +func (f *FIFO) hasSynced_locked() bool { return f.populated && f.initialPopulationCount == 0 } @@ -287,6 +294,7 @@ func (f *FIFO) Pop(process PopProcessFunc) (interface{}, error) { f.cond.Wait() } + isInInitialList := !f.hasSynced_locked() id := f.queue[0] f.queue = f.queue[1:] if f.initialPopulationCount > 0 { @@ -298,7 +306,7 @@ func (f *FIFO) Pop(process PopProcessFunc) (interface{}, error) { continue } delete(f.items, id) - err := process(item) + err := process(item, isInInitialList) if e, ok := err.(ErrRequeue); ok { f.addIfNotPresent(id, item) err = e.Err diff --git a/vendor/k8s.io/client-go/tools/cache/reflector.go b/vendor/k8s.io/client-go/tools/cache/reflector.go index 9cd476be8a..2b335c104c 100644 --- a/vendor/k8s.io/client-go/tools/cache/reflector.go +++ b/vendor/k8s.io/client-go/tools/cache/reflector.go @@ -23,6 +23,7 @@ import ( "io" "math/rand" "reflect" + "strings" "sync" "time" @@ -40,6 +41,7 @@ import ( "k8s.io/client-go/tools/pager" "k8s.io/klog/v2" "k8s.io/utils/clock" + "k8s.io/utils/pointer" "k8s.io/utils/trace" ) @@ -49,12 +51,11 @@ const defaultExpectedTypeName = "" type Reflector struct { // name identifies this reflector. By default it will be a file:line if possible. name string - // The name of the type we expect to place in the store. The name // will be the stringification of expectedGVK if provided, and the // stringification of expectedType otherwise. It is for display // only, and should not be used for parsing or comparison. - expectedTypeName string + typeDescription string // An example object of the type we expect to place in the store. // Only the type needs to be right, except that when that is // `unstructured.Unstructured` the object's `"apiVersion"` and @@ -66,17 +67,11 @@ type Reflector struct { store Store // listerWatcher is used to perform lists and watches. listerWatcher ListerWatcher - // backoff manages backoff of ListWatch backoffManager wait.BackoffManager // initConnBackoffManager manages backoff the initial connection with the Watch call of ListAndWatch. initConnBackoffManager wait.BackoffManager - // MaxInternalErrorRetryDuration defines how long we should retry internal errors returned by watch. - MaxInternalErrorRetryDuration time.Duration - - resyncPeriod time.Duration - // ShouldResync is invoked periodically and whenever it returns `true` the Store's Resync operation is invoked - ShouldResync func() bool + resyncPeriod time.Duration // clock allows tests to manipulate time clock clock.Clock // paginatedResult defines whether pagination should be forced for list calls. @@ -91,6 +86,8 @@ type Reflector struct { isLastSyncResourceVersionUnavailable bool // lastSyncResourceVersionMutex guards read/write access to lastSyncResourceVersion lastSyncResourceVersionMutex sync.RWMutex + // Called whenever the ListAndWatch drops the connection with an error. + watchErrorHandler WatchErrorHandler // WatchListPageSize is the requested chunk size of initial and resync watch lists. // If unset, for consistent reads (RV="") or reads that opt-into arbitrarily old data // (RV="0") it will default to pager.PageSize, for the rest (RV != "" && RV != "0") @@ -99,8 +96,19 @@ type Reflector struct { // etcd, which is significantly less efficient and may lead to serious performance and // scalability problems. WatchListPageSize int64 - // Called whenever the ListAndWatch drops the connection with an error. - watchErrorHandler WatchErrorHandler + // ShouldResync is invoked periodically and whenever it returns `true` the Store's Resync operation is invoked + ShouldResync func() bool + // MaxInternalErrorRetryDuration defines how long we should retry internal errors returned by watch. + MaxInternalErrorRetryDuration time.Duration + // UseWatchList if turned on instructs the reflector to open a stream to bring data from the API server. + // Streaming has the primary advantage of using fewer server's resources to fetch data. + // + // The old behaviour establishes a LIST request which gets data in chunks. + // Paginated list is less efficient and depending on the actual size of objects + // might result in an increased memory consumption of the APIServer. + // + // See https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/3157-watch-list#design-details + UseWatchList bool } // ResourceVersionUpdater is an interface that allows store implementation to @@ -131,13 +139,13 @@ func DefaultWatchErrorHandler(r *Reflector, err error) { // Don't set LastSyncResourceVersionUnavailable - LIST call with ResourceVersion=RV already // has a semantic that it returns data at least as fresh as provided RV. // So first try to LIST with setting RV to resource version of last observed object. - klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.expectedTypeName, err) + klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.typeDescription, err) case err == io.EOF: // watch closed normally case err == io.ErrUnexpectedEOF: - klog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedTypeName, err) + klog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.typeDescription, err) default: - utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.expectedTypeName, err)) + utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.typeDescription, err)) } } @@ -155,7 +163,40 @@ func NewNamespaceKeyedIndexerAndReflector(lw ListerWatcher, expectedType interfa return indexer, reflector } -// NewReflector creates a new Reflector object which will keep the +// NewReflector creates a new Reflector with its name defaulted to the closest source_file.go:line in the call stack +// that is outside this package. See NewReflectorWithOptions for further information. +func NewReflector(lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector { + return NewReflectorWithOptions(lw, expectedType, store, ReflectorOptions{ResyncPeriod: resyncPeriod}) +} + +// NewNamedReflector creates a new Reflector with the specified name. See NewReflectorWithOptions for further +// information. +func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector { + return NewReflectorWithOptions(lw, expectedType, store, ReflectorOptions{Name: name, ResyncPeriod: resyncPeriod}) +} + +// ReflectorOptions configures a Reflector. +type ReflectorOptions struct { + // Name is the Reflector's name. If unset/unspecified, the name defaults to the closest source_file.go:line + // in the call stack that is outside this package. + Name string + + // TypeDescription is the Reflector's type description. If unset/unspecified, the type description is defaulted + // using the following rules: if the expectedType passed to NewReflectorWithOptions was nil, the type description is + // "". If the expectedType is an instance of *unstructured.Unstructured and its apiVersion and kind fields + // are set, the type description is the string encoding of those. Otherwise, the type description is set to the + // go type of expectedType.. + TypeDescription string + + // ResyncPeriod is the Reflector's resync period. If unset/unspecified, the resync period defaults to 0 + // (do not resync). + ResyncPeriod time.Duration + + // Clock allows tests to control time. If unset defaults to clock.RealClock{} + Clock clock.Clock +} + +// NewReflectorWithOptions creates a new Reflector object which will keep the // given store up to date with the server's contents for the given // resource. Reflector promises to only put things in the store that // have the type of expectedType, unless expectedType is nil. If @@ -165,49 +206,74 @@ func NewNamespaceKeyedIndexerAndReflector(lw ListerWatcher, expectedType interfa // "yes". This enables you to use reflectors to periodically process // everything as well as incrementally processing the things that // change. -func NewReflector(lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector { - return NewNamedReflector(naming.GetNameFromCallsite(internalPackages...), lw, expectedType, store, resyncPeriod) -} - -// NewNamedReflector same as NewReflector, but with a specified name for logging -func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector { - realClock := &clock.RealClock{} +func NewReflectorWithOptions(lw ListerWatcher, expectedType interface{}, store Store, options ReflectorOptions) *Reflector { + reflectorClock := options.Clock + if reflectorClock == nil { + reflectorClock = clock.RealClock{} + } r := &Reflector{ - name: name, - listerWatcher: lw, - store: store, + name: options.Name, + resyncPeriod: options.ResyncPeriod, + typeDescription: options.TypeDescription, + listerWatcher: lw, + store: store, // We used to make the call every 1sec (1 QPS), the goal here is to achieve ~98% traffic reduction when // API server is not healthy. With these parameters, backoff will stop at [30,60) sec interval which is // 0.22 QPS. If we don't backoff for 2min, assume API server is healthy and we reset the backoff. - backoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, realClock), - initConnBackoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, realClock), - resyncPeriod: resyncPeriod, - clock: realClock, + backoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, reflectorClock), + initConnBackoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, reflectorClock), + clock: reflectorClock, watchErrorHandler: WatchErrorHandler(DefaultWatchErrorHandler), + expectedType: reflect.TypeOf(expectedType), } - r.setExpectedType(expectedType) + + if r.name == "" { + r.name = naming.GetNameFromCallsite(internalPackages...) + } + + if r.typeDescription == "" { + r.typeDescription = getTypeDescriptionFromObject(expectedType) + } + + if r.expectedGVK == nil { + r.expectedGVK = getExpectedGVKFromObject(expectedType) + } + return r } -func (r *Reflector) setExpectedType(expectedType interface{}) { - r.expectedType = reflect.TypeOf(expectedType) - if r.expectedType == nil { - r.expectedTypeName = defaultExpectedTypeName - return +func getTypeDescriptionFromObject(expectedType interface{}) string { + if expectedType == nil { + return defaultExpectedTypeName } - r.expectedTypeName = r.expectedType.String() + reflectDescription := reflect.TypeOf(expectedType).String() - if obj, ok := expectedType.(*unstructured.Unstructured); ok { - // Use gvk to check that watch event objects are of the desired type. - gvk := obj.GroupVersionKind() - if gvk.Empty() { - klog.V(4).Infof("Reflector from %s configured with expectedType of *unstructured.Unstructured with empty GroupVersionKind.", r.name) - return - } - r.expectedGVK = &gvk - r.expectedTypeName = gvk.String() + obj, ok := expectedType.(*unstructured.Unstructured) + if !ok { + return reflectDescription } + + gvk := obj.GroupVersionKind() + if gvk.Empty() { + return reflectDescription + } + + return gvk.String() +} + +func getExpectedGVKFromObject(expectedType interface{}) *schema.GroupVersionKind { + obj, ok := expectedType.(*unstructured.Unstructured) + if !ok { + return nil + } + + gvk := obj.GroupVersionKind() + if gvk.Empty() { + return nil + } + + return &gvk } // internalPackages are packages that ignored when creating a default reflector name. These packages are in the common @@ -218,13 +284,13 @@ var internalPackages = []string{"client-go/tools/cache/"} // objects and subsequent deltas. // Run will exit when stopCh is closed. func (r *Reflector) Run(stopCh <-chan struct{}) { - klog.V(3).Infof("Starting reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name) + klog.V(3).Infof("Starting reflector %s (%s) from %s", r.typeDescription, r.resyncPeriod, r.name) wait.BackoffUntil(func() { if err := r.ListAndWatch(stopCh); err != nil { r.watchErrorHandler(r, err) } }, r.backoffManager, true, stopCh) - klog.V(3).Infof("Stopping reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name) + klog.V(3).Infof("Stopping reflector %s (%s) from %s", r.typeDescription, r.resyncPeriod, r.name) } var ( @@ -254,42 +320,75 @@ func (r *Reflector) resyncChan() (<-chan time.Time, func() bool) { // and then use the resource version to watch. // It returns error if ListAndWatch didn't even try to initialize watch. func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { - klog.V(3).Infof("Listing and watching %v from %s", r.expectedTypeName, r.name) + klog.V(3).Infof("Listing and watching %v from %s", r.typeDescription, r.name) + var err error + var w watch.Interface + fallbackToList := !r.UseWatchList - err := r.list(stopCh) - if err != nil { - return err + if r.UseWatchList { + w, err = r.watchList(stopCh) + if w == nil && err == nil { + // stopCh was closed + return nil + } + if err != nil { + if !apierrors.IsInvalid(err) { + return err + } + klog.Warning("the watch-list feature is not supported by the server, falling back to the previous LIST/WATCH semantic") + fallbackToList = true + // Ensure that we won't accidentally pass some garbage down the watch. + w = nil + } + } + + if fallbackToList { + err = r.list(stopCh) + if err != nil { + return err + } } resyncerrc := make(chan error, 1) cancelCh := make(chan struct{}) defer close(cancelCh) - go func() { - resyncCh, cleanup := r.resyncChan() - defer func() { - cleanup() // Call the last one written into cleanup - }() - for { - select { - case <-resyncCh: - case <-stopCh: - return - case <-cancelCh: + go r.startResync(stopCh, cancelCh, resyncerrc) + return r.watch(w, stopCh, resyncerrc) +} + +// startResync periodically calls r.store.Resync() method. +// Note that this method is blocking and should be +// called in a separate goroutine. +func (r *Reflector) startResync(stopCh <-chan struct{}, cancelCh <-chan struct{}, resyncerrc chan error) { + resyncCh, cleanup := r.resyncChan() + defer func() { + cleanup() // Call the last one written into cleanup + }() + for { + select { + case <-resyncCh: + case <-stopCh: + return + case <-cancelCh: + return + } + if r.ShouldResync == nil || r.ShouldResync() { + klog.V(4).Infof("%s: forcing resync", r.name) + if err := r.store.Resync(); err != nil { + resyncerrc <- err return } - if r.ShouldResync == nil || r.ShouldResync() { - klog.V(4).Infof("%s: forcing resync", r.name) - if err := r.store.Resync(); err != nil { - resyncerrc <- err - return - } - } - cleanup() - resyncCh, cleanup = r.resyncChan() } - }() + cleanup() + resyncCh, cleanup = r.resyncChan() + } +} +// watch simply starts a watch request with the server. +func (r *Reflector) watch(w watch.Interface, stopCh <-chan struct{}, resyncerrc chan error) error { + var err error retry := NewRetryWithDeadline(r.MaxInternalErrorRetryDuration, time.Minute, apierrors.IsInternalError, r.clock) + for { // give the stopCh a chance to stop the loop, even in case of continue statements further down on errors select { @@ -298,35 +397,41 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { default: } - timeoutSeconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0)) - options := metav1.ListOptions{ - ResourceVersion: r.LastSyncResourceVersion(), - // We want to avoid situations of hanging watchers. Stop any watchers that do not - // receive any events within the timeout window. - TimeoutSeconds: &timeoutSeconds, - // To reduce load on kube-apiserver on watch restarts, you may enable watch bookmarks. - // Reflector doesn't assume bookmarks are returned at all (if the server do not support - // watch bookmarks, it will ignore this field). - AllowWatchBookmarks: true, - } - // start the clock before sending the request, since some proxies won't flush headers until after the first watch event is sent start := r.clock.Now() - w, err := r.listerWatcher.Watch(options) - if err != nil { - // If this is "connection refused" error, it means that most likely apiserver is not responsive. - // It doesn't make sense to re-list all objects because most likely we will be able to restart - // watch where we ended. - // If that's the case begin exponentially backing off and resend watch request. - // Do the same for "429" errors. - if utilnet.IsConnectionRefused(err) || apierrors.IsTooManyRequests(err) { - <-r.initConnBackoffManager.Backoff().C() - continue + + if w == nil { + timeoutSeconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0)) + options := metav1.ListOptions{ + ResourceVersion: r.LastSyncResourceVersion(), + // We want to avoid situations of hanging watchers. Stop any watchers that do not + // receive any events within the timeout window. + TimeoutSeconds: &timeoutSeconds, + // To reduce load on kube-apiserver on watch restarts, you may enable watch bookmarks. + // Reflector doesn't assume bookmarks are returned at all (if the server do not support + // watch bookmarks, it will ignore this field). + AllowWatchBookmarks: true, + } + + w, err = r.listerWatcher.Watch(options) + if err != nil { + if canRetry := isWatchErrorRetriable(err); canRetry { + klog.V(4).Infof("%s: watch of %v returned %v - backing off", r.name, r.typeDescription, err) + select { + case <-stopCh: + return nil + case <-r.initConnBackoffManager.Backoff().C(): + continue + } + } + return err } - return err } - err = watchHandler(start, w, r.store, r.expectedType, r.expectedGVK, r.name, r.expectedTypeName, r.setLastSyncResourceVersion, r.clock, resyncerrc, stopCh) + err = watchHandler(start, w, r.store, r.expectedType, r.expectedGVK, r.name, r.typeDescription, r.setLastSyncResourceVersion, nil, r.clock, resyncerrc, stopCh) + // Ensure that watch will not be reused across iterations. + w.Stop() + w = nil retry.After(err) if err != nil { if err != errorStopRequested { @@ -335,16 +440,20 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { // Don't set LastSyncResourceVersionUnavailable - LIST call with ResourceVersion=RV already // has a semantic that it returns data at least as fresh as provided RV. // So first try to LIST with setting RV to resource version of last observed object. - klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.expectedTypeName, err) + klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.typeDescription, err) case apierrors.IsTooManyRequests(err): - klog.V(2).Infof("%s: watch of %v returned 429 - backing off", r.name, r.expectedTypeName) - <-r.initConnBackoffManager.Backoff().C() - continue + klog.V(2).Infof("%s: watch of %v returned 429 - backing off", r.name, r.typeDescription) + select { + case <-stopCh: + return nil + case <-r.initConnBackoffManager.Backoff().C(): + continue + } case apierrors.IsInternalError(err) && retry.ShouldRetry(): - klog.V(2).Infof("%s: retrying watch of %v internal error: %v", r.name, r.expectedTypeName, err) + klog.V(2).Infof("%s: retrying watch of %v internal error: %v", r.name, r.typeDescription, err) continue default: - klog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedTypeName, err) + klog.Warningf("%s: watch of %v ended with: %v", r.name, r.typeDescription, err) } } return nil @@ -421,8 +530,8 @@ func (r *Reflector) list(stopCh <-chan struct{}) error { } initTrace.Step("Objects listed", trace.Field{Key: "error", Value: err}) if err != nil { - klog.Warningf("%s: failed to list %v: %v", r.name, r.expectedTypeName, err) - return fmt.Errorf("failed to list %v: %w", r.expectedTypeName, err) + klog.Warningf("%s: failed to list %v: %v", r.name, r.typeDescription, err) + return fmt.Errorf("failed to list %v: %w", r.typeDescription, err) } // We check if the list was paginated and if so set the paginatedResult based on that. @@ -460,6 +569,114 @@ func (r *Reflector) list(stopCh <-chan struct{}) error { return nil } +// watchList establishes a stream to get a consistent snapshot of data +// from the server as described in https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/3157-watch-list#proposal +// +// case 1: start at Most Recent (RV="", ResourceVersionMatch=ResourceVersionMatchNotOlderThan) +// Establishes a consistent stream with the server. +// That means the returned data is consistent, as if, served directly from etcd via a quorum read. +// It begins with synthetic "Added" events of all resources up to the most recent ResourceVersion. +// It ends with a synthetic "Bookmark" event containing the most recent ResourceVersion. +// After receiving a "Bookmark" event the reflector is considered to be synchronized. +// It replaces its internal store with the collected items and +// reuses the current watch requests for getting further events. +// +// case 2: start at Exact (RV>"0", ResourceVersionMatch=ResourceVersionMatchNotOlderThan) +// Establishes a stream with the server at the provided resource version. +// To establish the initial state the server begins with synthetic "Added" events. +// It ends with a synthetic "Bookmark" event containing the provided or newer resource version. +// After receiving a "Bookmark" event the reflector is considered to be synchronized. +// It replaces its internal store with the collected items and +// reuses the current watch requests for getting further events. +func (r *Reflector) watchList(stopCh <-chan struct{}) (watch.Interface, error) { + var w watch.Interface + var err error + var temporaryStore Store + var resourceVersion string + // TODO(#115478): see if this function could be turned + // into a method and see if error handling + // could be unified with the r.watch method + isErrorRetriableWithSideEffectsFn := func(err error) bool { + if canRetry := isWatchErrorRetriable(err); canRetry { + klog.V(2).Infof("%s: watch-list of %v returned %v - backing off", r.name, r.typeDescription, err) + <-r.initConnBackoffManager.Backoff().C() + return true + } + if isExpiredError(err) || isTooLargeResourceVersionError(err) { + // we tried to re-establish a watch request but the provided RV + // has either expired or it is greater than the server knows about. + // In that case we reset the RV and + // try to get a consistent snapshot from the watch cache (case 1) + r.setIsLastSyncResourceVersionUnavailable(true) + return true + } + return false + } + + initTrace := trace.New("Reflector WatchList", trace.Field{Key: "name", Value: r.name}) + defer initTrace.LogIfLong(10 * time.Second) + for { + select { + case <-stopCh: + return nil, nil + default: + } + + resourceVersion = "" + lastKnownRV := r.rewatchResourceVersion() + temporaryStore = NewStore(DeletionHandlingMetaNamespaceKeyFunc) + // TODO(#115478): large "list", slow clients, slow network, p&f + // might slow down streaming and eventually fail. + // maybe in such a case we should retry with an increased timeout? + timeoutSeconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0)) + options := metav1.ListOptions{ + ResourceVersion: lastKnownRV, + AllowWatchBookmarks: true, + SendInitialEvents: pointer.Bool(true), + ResourceVersionMatch: metav1.ResourceVersionMatchNotOlderThan, + TimeoutSeconds: &timeoutSeconds, + } + start := r.clock.Now() + + w, err = r.listerWatcher.Watch(options) + if err != nil { + if isErrorRetriableWithSideEffectsFn(err) { + continue + } + return nil, err + } + bookmarkReceived := pointer.Bool(false) + err = watchHandler(start, w, temporaryStore, r.expectedType, r.expectedGVK, r.name, r.typeDescription, + func(rv string) { resourceVersion = rv }, + bookmarkReceived, + r.clock, make(chan error), stopCh) + if err != nil { + w.Stop() // stop and retry with clean state + if err == errorStopRequested { + return nil, nil + } + if isErrorRetriableWithSideEffectsFn(err) { + continue + } + return nil, err + } + if *bookmarkReceived { + break + } + } + // We successfully got initial state from watch-list confirmed by the + // "k8s.io/initial-events-end" bookmark. + initTrace.Step("Objects streamed", trace.Field{Key: "count", Value: len(temporaryStore.List())}) + r.setIsLastSyncResourceVersionUnavailable(false) + if err = r.store.Replace(temporaryStore.List(), resourceVersion); err != nil { + return nil, fmt.Errorf("unable to sync watch-list result: %v", err) + } + initTrace.Step("SyncWith done") + r.setLastSyncResourceVersion(resourceVersion) + + return w, nil +} + // syncWith replaces the store's items with the given list. func (r *Reflector) syncWith(items []runtime.Object, resourceVersion string) error { found := make([]interface{}, 0, len(items)) @@ -478,15 +695,17 @@ func watchHandler(start time.Time, name string, expectedTypeName string, setLastSyncResourceVersion func(string), + exitOnInitialEventsEndBookmark *bool, clock clock.Clock, errc chan error, stopCh <-chan struct{}, ) error { eventCount := 0 - - // Stopping the watcher should be idempotent and if we return from this function there's no way - // we're coming back in with the same watch interface. - defer w.Stop() + if exitOnInitialEventsEndBookmark != nil { + // set it to false just in case somebody + // made it positive + *exitOnInitialEventsEndBookmark = false + } loop: for { @@ -541,6 +760,11 @@ loop: } case watch.Bookmark: // A `Bookmark` means watch has synced here, just update the resourceVersion + if _, ok := meta.GetAnnotations()["k8s.io/initial-events-end"]; ok { + if exitOnInitialEventsEndBookmark != nil { + *exitOnInitialEventsEndBookmark = true + } + } default: utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", name, event)) } @@ -549,6 +773,11 @@ loop: rvu.UpdateResourceVersion(resourceVersion) } eventCount++ + if exitOnInitialEventsEndBookmark != nil && *exitOnInitialEventsEndBookmark { + watchDuration := clock.Since(start) + klog.V(4).Infof("exiting %v Watch because received the bookmark that marks the end of initial events stream, total %v items received in %v", name, eventCount, watchDuration) + return nil + } } } @@ -597,6 +826,18 @@ func (r *Reflector) relistResourceVersion() string { return r.lastSyncResourceVersion } +// rewatchResourceVersion determines the resource version the reflector should start streaming from. +func (r *Reflector) rewatchResourceVersion() string { + r.lastSyncResourceVersionMutex.RLock() + defer r.lastSyncResourceVersionMutex.RUnlock() + if r.isLastSyncResourceVersionUnavailable { + // initial stream should return data at the most recent resource version. + // the returned data must be consistent i.e. as if served from etcd via a quorum read + return "" + } + return r.lastSyncResourceVersion +} + // setIsLastSyncResourceVersionUnavailable sets if the last list or watch request with lastSyncResourceVersion returned // "expired" or "too large resource version" error. func (r *Reflector) setIsLastSyncResourceVersionUnavailable(isUnavailable bool) { @@ -635,5 +876,25 @@ func isTooLargeResourceVersionError(err error) bool { return true } } + + // Matches the message returned by api server before 1.17.0 + if strings.Contains(apierr.Status().Message, "Too large resource version") { + return true + } + + return false +} + +// isWatchErrorRetriable determines if it is safe to retry +// a watch error retrieved from the server. +func isWatchErrorRetriable(err error) bool { + // If this is "connection refused" error, it means that most likely apiserver is not responsive. + // It doesn't make sense to re-list all objects because most likely we will be able to restart + // watch where we ended. + // If that's the case begin exponentially backing off and resend watch request. + // Do the same for "429" errors. + if utilnet.IsConnectionRefused(err) || apierrors.IsTooManyRequests(err) { + return true + } return false } diff --git a/vendor/k8s.io/client-go/tools/cache/shared_informer.go b/vendor/k8s.io/client-go/tools/cache/shared_informer.go index 4979642ce1..a889fdbc36 100644 --- a/vendor/k8s.io/client-go/tools/cache/shared_informer.go +++ b/vendor/k8s.io/client-go/tools/cache/shared_informer.go @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/cache/synctrack" "k8s.io/utils/buffer" "k8s.io/utils/clock" @@ -132,11 +133,13 @@ import ( // state, except that its ResourceVersion is replaced with a // ResourceVersion in which the object is actually absent. type SharedInformer interface { - // AddEventHandler adds an event handler to the shared informer using the shared informer's resync - // period. Events to a single handler are delivered sequentially, but there is no coordination - // between different handlers. - // It returns a registration handle for the handler that can be used to remove - // the handler again. + // AddEventHandler adds an event handler to the shared informer using + // the shared informer's resync period. Events to a single handler are + // delivered sequentially, but there is no coordination between + // different handlers. + // It returns a registration handle for the handler that can be used to + // remove the handler again, or to tell if the handler is synced (has + // seen every item in the initial list). AddEventHandler(handler ResourceEventHandler) (ResourceEventHandlerRegistration, error) // AddEventHandlerWithResyncPeriod adds an event handler to the // shared informer with the requested resync period; zero means @@ -169,6 +172,10 @@ type SharedInformer interface { // HasSynced returns true if the shared informer's store has been // informed by at least one full LIST of the authoritative state // of the informer's object collection. This is unrelated to "resync". + // + // Note that this doesn't tell you if an individual handler is synced!! + // For that, please call HasSynced on the handle returned by + // AddEventHandler. HasSynced() bool // LastSyncResourceVersion is the resource version observed when last synced with the underlying // store. The value returned is not synchronized with access to the underlying store and is not @@ -210,7 +217,14 @@ type SharedInformer interface { // Opaque interface representing the registration of ResourceEventHandler for // a SharedInformer. Must be supplied back to the same SharedInformer's // `RemoveEventHandler` to unregister the handlers. -type ResourceEventHandlerRegistration interface{} +// +// Also used to tell if the handler is synced (has had all items in the initial +// list delivered). +type ResourceEventHandlerRegistration interface { + // HasSynced reports if both the parent has synced and all pre-sync + // events have been delivered. + HasSynced() bool +} // SharedIndexInformer provides add and get Indexers ability based on SharedInformer. type SharedIndexInformer interface { @@ -220,14 +234,26 @@ type SharedIndexInformer interface { GetIndexer() Indexer } -// NewSharedInformer creates a new instance for the listwatcher. +// NewSharedInformer creates a new instance for the ListerWatcher. See NewSharedIndexInformerWithOptions for full details. func NewSharedInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration) SharedInformer { return NewSharedIndexInformer(lw, exampleObject, defaultEventHandlerResyncPeriod, Indexers{}) } -// NewSharedIndexInformer creates a new instance for the listwatcher. -// The created informer will not do resyncs if the given -// defaultEventHandlerResyncPeriod is zero. Otherwise: for each +// NewSharedIndexInformer creates a new instance for the ListerWatcher and specified Indexers. See +// NewSharedIndexInformerWithOptions for full details. +func NewSharedIndexInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration, indexers Indexers) SharedIndexInformer { + return NewSharedIndexInformerWithOptions( + lw, + exampleObject, + SharedIndexInformerOptions{ + ResyncPeriod: defaultEventHandlerResyncPeriod, + Indexers: indexers, + }, + ) +} + +// NewSharedIndexInformerWithOptions creates a new instance for the ListerWatcher. +// The created informer will not do resyncs if options.ResyncPeriod is zero. Otherwise: for each // handler that with a non-zero requested resync period, whether added // before or after the informer starts, the nominal resync period is // the requested resync period rounded up to a multiple of the @@ -235,21 +261,36 @@ func NewSharedInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEv // checking period is established when the informer starts running, // and is the maximum of (a) the minimum of the resync periods // requested before the informer starts and the -// defaultEventHandlerResyncPeriod given here and (b) the constant +// options.ResyncPeriod given here and (b) the constant // `minimumResyncPeriod` defined in this file. -func NewSharedIndexInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration, indexers Indexers) SharedIndexInformer { +func NewSharedIndexInformerWithOptions(lw ListerWatcher, exampleObject runtime.Object, options SharedIndexInformerOptions) SharedIndexInformer { realClock := &clock.RealClock{} - sharedIndexInformer := &sharedIndexInformer{ + + return &sharedIndexInformer{ + indexer: NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, options.Indexers), processor: &sharedProcessor{clock: realClock}, - indexer: NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers), listerWatcher: lw, objectType: exampleObject, - resyncCheckPeriod: defaultEventHandlerResyncPeriod, - defaultEventHandlerResyncPeriod: defaultEventHandlerResyncPeriod, - cacheMutationDetector: NewCacheMutationDetector(fmt.Sprintf("%T", exampleObject)), + objectDescription: options.ObjectDescription, + resyncCheckPeriod: options.ResyncPeriod, + defaultEventHandlerResyncPeriod: options.ResyncPeriod, clock: realClock, + cacheMutationDetector: NewCacheMutationDetector(fmt.Sprintf("%T", exampleObject)), } - return sharedIndexInformer +} + +// SharedIndexInformerOptions configures a sharedIndexInformer. +type SharedIndexInformerOptions struct { + // ResyncPeriod is the default event handler resync period and resync check + // period. If unset/unspecified, these are defaulted to 0 (do not resync). + ResyncPeriod time.Duration + + // Indexers is the sharedIndexInformer's indexers. If unset/unspecified, no indexers are configured. + Indexers Indexers + + // ObjectDescription is the sharedIndexInformer's object description. This is passed through to the + // underlying Reflector's type description. + ObjectDescription string } // InformerSynced is a function that can be used to determine if an informer has synced. This is useful for determining if caches have synced. @@ -323,12 +364,13 @@ type sharedIndexInformer struct { listerWatcher ListerWatcher - // objectType is an example object of the type this informer is - // expected to handle. Only the type needs to be right, except - // that when that is `unstructured.Unstructured` the object's - // `"apiVersion"` and `"kind"` must also be right. + // objectType is an example object of the type this informer is expected to handle. If set, an event + // with an object with a mismatching type is dropped instead of being delivered to listeners. objectType runtime.Object + // objectDescription is the description of this informer's objects. This typically defaults to + objectDescription string + // resyncCheckPeriod is how often we want the reflector's resync timer to fire so it can call // shouldResync to check if any of our listeners need a resync. resyncCheckPeriod time.Duration @@ -378,7 +420,8 @@ type updateNotification struct { } type addNotification struct { - newObj interface{} + newObj interface{} + isInInitialList bool } type deleteNotification struct { @@ -423,12 +466,13 @@ func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) { }) cfg := &Config{ - Queue: fifo, - ListerWatcher: s.listerWatcher, - ObjectType: s.objectType, - FullResyncPeriod: s.resyncCheckPeriod, - RetryOnError: false, - ShouldResync: s.processor.shouldResync, + Queue: fifo, + ListerWatcher: s.listerWatcher, + ObjectType: s.objectType, + ObjectDescription: s.objectDescription, + FullResyncPeriod: s.resyncCheckPeriod, + RetryOnError: false, + ShouldResync: s.processor.shouldResync, Process: s.HandleDeltas, WatchErrorHandler: s.watchErrorHandler, @@ -557,7 +601,7 @@ func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEv } } - listener := newProcessListener(handler, resyncPeriod, determineResyncPeriod(resyncPeriod, s.resyncCheckPeriod), s.clock.Now(), initialBufferSize) + listener := newProcessListener(handler, resyncPeriod, determineResyncPeriod(resyncPeriod, s.resyncCheckPeriod), s.clock.Now(), initialBufferSize, s.HasSynced) if !s.started { return s.processor.addListener(listener), nil @@ -573,27 +617,35 @@ func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEv handle := s.processor.addListener(listener) for _, item := range s.indexer.List() { - listener.add(addNotification{newObj: item}) + // Note that we enqueue these notifications with the lock held + // and before returning the handle. That means there is never a + // chance for anyone to call the handle's HasSynced method in a + // state when it would falsely return true (i.e., when the + // shared informer is synced but it has not observed an Add + // with isInitialList being true, nor when the thread + // processing notifications somehow goes faster than this + // thread adding them and the counter is temporarily zero). + listener.add(addNotification{newObj: item, isInInitialList: true}) } return handle, nil } -func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error { +func (s *sharedIndexInformer) HandleDeltas(obj interface{}, isInInitialList bool) error { s.blockDeltas.Lock() defer s.blockDeltas.Unlock() if deltas, ok := obj.(Deltas); ok { - return processDeltas(s, s.indexer, deltas) + return processDeltas(s, s.indexer, deltas, isInInitialList) } return errors.New("object given as Process argument is not Deltas") } // Conforms to ResourceEventHandler -func (s *sharedIndexInformer) OnAdd(obj interface{}) { +func (s *sharedIndexInformer) OnAdd(obj interface{}, isInInitialList bool) { // Invocation of this function is locked under s.blockDeltas, so it is // save to distribute the notification s.cacheMutationDetector.AddObject(obj) - s.processor.distribute(addNotification{newObj: obj}, false) + s.processor.distribute(addNotification{newObj: obj, isInInitialList: isInInitialList}, false) } // Conforms to ResourceEventHandler @@ -815,6 +867,8 @@ type processorListener struct { handler ResourceEventHandler + syncTracker *synctrack.SingleFileTracker + // pendingNotifications is an unbounded ring buffer that holds all notifications not yet distributed. // There is one per listener, but a failing/stalled listener will have infinite pendingNotifications // added until we OOM. @@ -845,11 +899,18 @@ type processorListener struct { resyncLock sync.Mutex } -func newProcessListener(handler ResourceEventHandler, requestedResyncPeriod, resyncPeriod time.Duration, now time.Time, bufferSize int) *processorListener { +// HasSynced returns true if the source informer has synced, and all +// corresponding events have been delivered. +func (p *processorListener) HasSynced() bool { + return p.syncTracker.HasSynced() +} + +func newProcessListener(handler ResourceEventHandler, requestedResyncPeriod, resyncPeriod time.Duration, now time.Time, bufferSize int, hasSynced func() bool) *processorListener { ret := &processorListener{ nextCh: make(chan interface{}), addCh: make(chan interface{}), handler: handler, + syncTracker: &synctrack.SingleFileTracker{UpstreamHasSynced: hasSynced}, pendingNotifications: *buffer.NewRingGrowing(bufferSize), requestedResyncPeriod: requestedResyncPeriod, resyncPeriod: resyncPeriod, @@ -861,6 +922,9 @@ func newProcessListener(handler ResourceEventHandler, requestedResyncPeriod, res } func (p *processorListener) add(notification interface{}) { + if a, ok := notification.(addNotification); ok && a.isInInitialList { + p.syncTracker.Start() + } p.addCh <- notification } @@ -906,7 +970,10 @@ func (p *processorListener) run() { case updateNotification: p.handler.OnUpdate(notification.oldObj, notification.newObj) case addNotification: - p.handler.OnAdd(notification.newObj) + p.handler.OnAdd(notification.newObj, notification.isInInitialList) + if notification.isInInitialList { + p.syncTracker.Finished() + } case deleteNotification: p.handler.OnDelete(notification.oldObj) default: diff --git a/vendor/k8s.io/client-go/tools/cache/synctrack/lazy.go b/vendor/k8s.io/client-go/tools/cache/synctrack/lazy.go new file mode 100644 index 0000000000..ce51da9af3 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/synctrack/lazy.go @@ -0,0 +1,83 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package synctrack + +import ( + "sync" + "sync/atomic" +) + +// Lazy defers the computation of `Evaluate` to when it is necessary. It is +// possible that Evaluate will be called in parallel from multiple goroutines. +type Lazy[T any] struct { + Evaluate func() (T, error) + + cache atomic.Pointer[cacheEntry[T]] +} + +type cacheEntry[T any] struct { + eval func() (T, error) + lock sync.RWMutex + result *T +} + +func (e *cacheEntry[T]) get() (T, error) { + if cur := func() *T { + e.lock.RLock() + defer e.lock.RUnlock() + return e.result + }(); cur != nil { + return *cur, nil + } + + e.lock.Lock() + defer e.lock.Unlock() + if e.result != nil { + return *e.result, nil + } + r, err := e.eval() + if err == nil { + e.result = &r + } + return r, err +} + +func (z *Lazy[T]) newCacheEntry() *cacheEntry[T] { + return &cacheEntry[T]{eval: z.Evaluate} +} + +// Notify should be called when something has changed necessitating a new call +// to Evaluate. +func (z *Lazy[T]) Notify() { z.cache.Swap(z.newCacheEntry()) } + +// Get should be called to get the current result of a call to Evaluate. If the +// current cached value is stale (due to a call to Notify), then Evaluate will +// be called synchronously. If subsequent calls to Get happen (without another +// Notify), they will all wait for the same return value. +// +// Error returns are not cached and will cause multiple calls to evaluate! +func (z *Lazy[T]) Get() (T, error) { + e := z.cache.Load() + if e == nil { + // Since we don't force a constructor, nil is a possible value. + // If multiple Gets race to set this, the swap makes sure only + // one wins. + z.cache.CompareAndSwap(nil, z.newCacheEntry()) + e = z.cache.Load() + } + return e.get() +} diff --git a/vendor/k8s.io/client-go/tools/cache/synctrack/synctrack.go b/vendor/k8s.io/client-go/tools/cache/synctrack/synctrack.go new file mode 100644 index 0000000000..3fa2beb6b7 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/synctrack/synctrack.go @@ -0,0 +1,120 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package synctrack contains utilities for helping controllers track whether +// they are "synced" or not, that is, whether they have processed all items +// from the informer's initial list. +package synctrack + +import ( + "sync" + "sync/atomic" + + "k8s.io/apimachinery/pkg/util/sets" +) + +// AsyncTracker helps propagate HasSynced in the face of multiple worker threads. +type AsyncTracker[T comparable] struct { + UpstreamHasSynced func() bool + + lock sync.Mutex + waiting sets.Set[T] +} + +// Start should be called prior to processing each key which is part of the +// initial list. +func (t *AsyncTracker[T]) Start(key T) { + t.lock.Lock() + defer t.lock.Unlock() + if t.waiting == nil { + t.waiting = sets.New[T](key) + } else { + t.waiting.Insert(key) + } +} + +// Finished should be called when finished processing a key which was part of +// the initial list. Since keys are tracked individually, nothing bad happens +// if you call Finished without a corresponding call to Start. This makes it +// easier to use this in combination with e.g. queues which don't make it easy +// to plumb through the isInInitialList boolean. +func (t *AsyncTracker[T]) Finished(key T) { + t.lock.Lock() + defer t.lock.Unlock() + if t.waiting != nil { + t.waiting.Delete(key) + } +} + +// HasSynced returns true if the source is synced and every key present in the +// initial list has been processed. This relies on the source not considering +// itself synced until *after* it has delivered the notification for the last +// key, and that notification handler must have called Start. +func (t *AsyncTracker[T]) HasSynced() bool { + // Call UpstreamHasSynced first: it might take a lock, which might take + // a significant amount of time, and we can't hold our lock while + // waiting on that or a user is likely to get a deadlock. + if !t.UpstreamHasSynced() { + return false + } + t.lock.Lock() + defer t.lock.Unlock() + return t.waiting.Len() == 0 +} + +// SingleFileTracker helps propagate HasSynced when events are processed in +// order (i.e. via a queue). +type SingleFileTracker struct { + // Important: count is used with atomic operations so it must be 64-bit + // aligned, otherwise atomic operations will panic. Having it at the top of + // the struct will guarantee that, even on 32-bit arches. + // See https://pkg.go.dev/sync/atomic#pkg-note-BUG for more information. + count int64 + + UpstreamHasSynced func() bool +} + +// Start should be called prior to processing each key which is part of the +// initial list. +func (t *SingleFileTracker) Start() { + atomic.AddInt64(&t.count, 1) +} + +// Finished should be called when finished processing a key which was part of +// the initial list. You must never call Finished() before (or without) its +// corresponding Start(), that is a logic error that could cause HasSynced to +// return a wrong value. To help you notice this should it happen, Finished() +// will panic if the internal counter goes negative. +func (t *SingleFileTracker) Finished() { + result := atomic.AddInt64(&t.count, -1) + if result < 0 { + panic("synctrack: negative counter; this logic error means HasSynced may return incorrect value") + } +} + +// HasSynced returns true if the source is synced and every key present in the +// initial list has been processed. This relies on the source not considering +// itself synced until *after* it has delivered the notification for the last +// key, and that notification handler must have called Start. +func (t *SingleFileTracker) HasSynced() bool { + // Call UpstreamHasSynced first: it might take a lock, which might take + // a significant amount of time, and we don't want to then act on a + // stale count value. + if !t.UpstreamHasSynced() { + return false + } + return atomic.LoadInt64(&t.count) <= 0 +} diff --git a/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go b/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go index c64ba9b26b..940e716175 100644 --- a/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go +++ b/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go @@ -64,9 +64,8 @@ import ( "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" rl "k8s.io/client-go/tools/leaderelection/resourcelock" - "k8s.io/utils/clock" - "k8s.io/klog/v2" + "k8s.io/utils/clock" ) const ( @@ -199,9 +198,7 @@ type LeaderElector struct { // stopped holding the leader lease func (le *LeaderElector) Run(ctx context.Context) { defer runtime.HandleCrash() - defer func() { - le.config.Callbacks.OnStoppedLeading() - }() + defer le.config.Callbacks.OnStoppedLeading() if !le.acquire(ctx) { return // ctx signalled done @@ -263,6 +260,7 @@ func (le *LeaderElector) acquire(ctx context.Context) bool { // renew loops calling tryAcquireOrRenew and returns immediately when tryAcquireOrRenew fails or ctx signals done. func (le *LeaderElector) renew(ctx context.Context) { + defer le.config.Lock.RecordEvent("stopped leading") ctx, cancel := context.WithCancel(ctx) defer cancel() wait.Until(func() { @@ -278,7 +276,6 @@ func (le *LeaderElector) renew(ctx context.Context) { klog.V(5).Infof("successfully renewed lease %v", desc) return } - le.config.Lock.RecordEvent("stopped leading") le.metrics.leaderOff(le.config.Name) klog.Infof("failed to renew lease %v: %v", desc, err) cancel() @@ -295,7 +292,7 @@ func (le *LeaderElector) release() bool { if !le.IsLeader() { return true } - now := metav1.Now() + now := metav1.NewTime(le.clock.Now()) leaderElectionRecord := rl.LeaderElectionRecord{ LeaderTransitions: le.observedRecord.LeaderTransitions, LeaseDurationSeconds: 1, @@ -315,7 +312,7 @@ func (le *LeaderElector) release() bool { // else it tries to renew the lease if it has already been acquired. Returns true // on success else returns false. func (le *LeaderElector) tryAcquireOrRenew(ctx context.Context) bool { - now := metav1.Now() + now := metav1.NewTime(le.clock.Now()) leaderElectionRecord := rl.LeaderElectionRecord{ HolderIdentity: le.config.Lock.Identity(), LeaseDurationSeconds: int(le.config.LeaseDuration / time.Second), @@ -347,7 +344,7 @@ func (le *LeaderElector) tryAcquireOrRenew(ctx context.Context) bool { le.observedRawRecord = oldLeaderElectionRawRecord } if len(oldLeaderElectionRecord.HolderIdentity) > 0 && - le.observedTime.Add(le.config.LeaseDuration).After(now.Time) && + le.observedTime.Add(time.Second*time.Duration(oldLeaderElectionRecord.LeaseDurationSeconds)).After(now.Time) && !le.IsLeader() { klog.V(4).Infof("lock is held by %v and has not yet expired", oldLeaderElectionRecord.HolderIdentity) return false diff --git a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go index c6e23bda16..05b5b20237 100644 --- a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go +++ b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go @@ -68,7 +68,7 @@ const ( // name: '*' // namespace: kube-system EndpointsLeasesResourceLock = "endpointsleases" - // When using EndpointsLeasesResourceLock, you need to ensure that + // When using ConfigMapsLeasesResourceLock, you need to ensure that // API Priority & Fairness is configured with non-default flow-schema // that will catch the necessary operations on leader-election related // configmap objects. diff --git a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go index 185ef0e500..8a9d7d60f2 100644 --- a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go +++ b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go @@ -117,10 +117,10 @@ func LeaseSpecToLeaderElectionRecord(spec *coordinationv1.LeaseSpec) *LeaderElec r.LeaderTransitions = int(*spec.LeaseTransitions) } if spec.AcquireTime != nil { - r.AcquireTime = metav1.Time{spec.AcquireTime.Time} + r.AcquireTime = metav1.Time{Time: spec.AcquireTime.Time} } if spec.RenewTime != nil { - r.RenewTime = metav1.Time{spec.RenewTime.Time} + r.RenewTime = metav1.Time{Time: spec.RenewTime.Time} } return &r @@ -132,8 +132,8 @@ func LeaderElectionRecordToLeaseSpec(ler *LeaderElectionRecord) coordinationv1.L return coordinationv1.LeaseSpec{ HolderIdentity: &ler.HolderIdentity, LeaseDurationSeconds: &leaseDurationSeconds, - AcquireTime: &metav1.MicroTime{ler.AcquireTime.Time}, - RenewTime: &metav1.MicroTime{ler.RenewTime.Time}, + AcquireTime: &metav1.MicroTime{Time: ler.AcquireTime.Time}, + RenewTime: &metav1.MicroTime{Time: ler.RenewTime.Time}, LeaseTransitions: &leaseTransitions, } } diff --git a/vendor/k8s.io/client-go/tools/metrics/metrics.go b/vendor/k8s.io/client-go/tools/metrics/metrics.go index 6c684c7fa1..f36430dc3e 100644 --- a/vendor/k8s.io/client-go/tools/metrics/metrics.go +++ b/vendor/k8s.io/client-go/tools/metrics/metrics.go @@ -58,6 +58,12 @@ type CallsMetric interface { Increment(exitCode int, callStatus string) } +// RetryMetric counts the number of retries sent to the server +// partitioned by code, method, and host. +type RetryMetric interface { + IncrementRetry(ctx context.Context, code string, method string, host string) +} + var ( // ClientCertExpiry is the expiry time of a client certificate ClientCertExpiry ExpiryMetric = noopExpiry{} @@ -76,6 +82,9 @@ var ( // ExecPluginCalls is the number of calls made to an exec plugin, partitioned by // exit code and call status. ExecPluginCalls CallsMetric = noopCalls{} + // RequestRetry is the retry metric that tracks the number of + // retries sent to the server. + RequestRetry RetryMetric = noopRetry{} ) // RegisterOpts contains all the metrics to register. Metrics may be nil. @@ -88,6 +97,7 @@ type RegisterOpts struct { RateLimiterLatency LatencyMetric RequestResult ResultMetric ExecPluginCalls CallsMetric + RequestRetry RetryMetric } // Register registers metrics for the rest client to use. This can @@ -118,6 +128,9 @@ func Register(opts RegisterOpts) { if opts.ExecPluginCalls != nil { ExecPluginCalls = opts.ExecPluginCalls } + if opts.RequestRetry != nil { + RequestRetry = opts.RequestRetry + } }) } @@ -144,3 +157,7 @@ func (noopResult) Increment(context.Context, string, string, string) {} type noopCalls struct{} func (noopCalls) Increment(int, string) {} + +type noopRetry struct{} + +func (noopRetry) IncrementRetry(context.Context, string, string, string) {} diff --git a/vendor/k8s.io/client-go/tools/record/event.go b/vendor/k8s.io/client-go/tools/record/event.go index 926605975f..1582e8ee7d 100644 --- a/vendor/k8s.io/client-go/tools/record/event.go +++ b/vendor/k8s.io/client-go/tools/record/event.go @@ -17,6 +17,7 @@ limitations under the License. package record import ( + "context" "fmt" "math/rand" "time" @@ -132,7 +133,9 @@ type EventBroadcaster interface { // with the event source set to the given event source. NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorder - // Shutdown shuts down the broadcaster + // Shutdown shuts down the broadcaster. Once the broadcaster is shut + // down, it will only try to record an event in a sink once before + // giving up on it with an error message. Shutdown() } @@ -157,31 +160,34 @@ func (a *EventRecorderAdapter) Eventf(regarding, _ runtime.Object, eventtype, re // Creates a new event broadcaster. func NewBroadcaster() EventBroadcaster { - return &eventBroadcasterImpl{ - Broadcaster: watch.NewLongQueueBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), - sleepDuration: defaultSleepDuration, - } + return newEventBroadcaster(watch.NewLongQueueBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), defaultSleepDuration) } func NewBroadcasterForTests(sleepDuration time.Duration) EventBroadcaster { - return &eventBroadcasterImpl{ - Broadcaster: watch.NewLongQueueBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), - sleepDuration: sleepDuration, - } + return newEventBroadcaster(watch.NewLongQueueBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), sleepDuration) } func NewBroadcasterWithCorrelatorOptions(options CorrelatorOptions) EventBroadcaster { - return &eventBroadcasterImpl{ - Broadcaster: watch.NewLongQueueBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), - sleepDuration: defaultSleepDuration, - options: options, + eventBroadcaster := newEventBroadcaster(watch.NewLongQueueBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), defaultSleepDuration) + eventBroadcaster.options = options + return eventBroadcaster +} + +func newEventBroadcaster(broadcaster *watch.Broadcaster, sleepDuration time.Duration) *eventBroadcasterImpl { + eventBroadcaster := &eventBroadcasterImpl{ + Broadcaster: broadcaster, + sleepDuration: sleepDuration, } + eventBroadcaster.cancelationCtx, eventBroadcaster.cancel = context.WithCancel(context.Background()) + return eventBroadcaster } type eventBroadcasterImpl struct { *watch.Broadcaster - sleepDuration time.Duration - options CorrelatorOptions + sleepDuration time.Duration + options CorrelatorOptions + cancelationCtx context.Context + cancel func() } // StartRecordingToSink starts sending events received from the specified eventBroadcaster to the given sink. @@ -191,15 +197,16 @@ func (e *eventBroadcasterImpl) StartRecordingToSink(sink EventSink) watch.Interf eventCorrelator := NewEventCorrelatorWithOptions(e.options) return e.StartEventWatcher( func(event *v1.Event) { - recordToSink(sink, event, eventCorrelator, e.sleepDuration) + e.recordToSink(sink, event, eventCorrelator) }) } func (e *eventBroadcasterImpl) Shutdown() { e.Broadcaster.Shutdown() + e.cancel() } -func recordToSink(sink EventSink, event *v1.Event, eventCorrelator *EventCorrelator, sleepDuration time.Duration) { +func (e *eventBroadcasterImpl) recordToSink(sink EventSink, event *v1.Event, eventCorrelator *EventCorrelator) { // Make a copy before modification, because there could be multiple listeners. // Events are safe to copy like this. eventCopy := *event @@ -221,12 +228,18 @@ func recordToSink(sink EventSink, event *v1.Event, eventCorrelator *EventCorrela klog.Errorf("Unable to write event '%#v' (retry limit exceeded!)", event) break } + // Randomize the first sleep so that various clients won't all be // synced up if the master goes down. + delay := e.sleepDuration if tries == 1 { - time.Sleep(time.Duration(float64(sleepDuration) * rand.Float64())) - } else { - time.Sleep(sleepDuration) + delay = time.Duration(float64(delay) * rand.Float64()) + } + select { + case <-e.cancelationCtx.Done(): + klog.Errorf("Unable to write event '%#v' (broadcaster is shut down)", event) + return + case <-time.After(delay): } } } diff --git a/vendor/k8s.io/client-go/tools/record/fake.go b/vendor/k8s.io/client-go/tools/record/fake.go index 0b3f344a97..fda4ad8ff8 100644 --- a/vendor/k8s.io/client-go/tools/record/fake.go +++ b/vendor/k8s.io/client-go/tools/record/fake.go @@ -41,20 +41,31 @@ func objectString(object runtime.Object, includeObject bool) string { ) } -func (f *FakeRecorder) Event(object runtime.Object, eventtype, reason, message string) { - if f.Events != nil { - f.Events <- fmt.Sprintf("%s %s %s%s", eventtype, reason, message, objectString(object, f.IncludeObject)) +func annotationsString(annotations map[string]string) string { + if len(annotations) == 0 { + return "" + } else { + return " " + fmt.Sprint(annotations) } } -func (f *FakeRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { +func (f *FakeRecorder) writeEvent(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) { if f.Events != nil { - f.Events <- fmt.Sprintf(eventtype+" "+reason+" "+messageFmt, args...) + objectString(object, f.IncludeObject) + f.Events <- fmt.Sprintf(eventtype+" "+reason+" "+messageFmt, args...) + + objectString(object, f.IncludeObject) + annotationsString(annotations) } } +func (f *FakeRecorder) Event(object runtime.Object, eventtype, reason, message string) { + f.writeEvent(object, nil, eventtype, reason, "%s", message) +} + +func (f *FakeRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { + f.writeEvent(object, nil, eventtype, reason, messageFmt, args...) +} + func (f *FakeRecorder) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) { - f.Eventf(object, eventtype, reason, messageFmt, args...) + f.writeEvent(object, annotations, eventtype, reason, messageFmt, args...) } // NewFakeRecorder creates new fake event recorder with event channel with diff --git a/vendor/k8s.io/client-go/util/cert/cert.go b/vendor/k8s.io/client-go/util/cert/cert.go index 4ef02f09fe..91e171271a 100644 --- a/vendor/k8s.io/client-go/util/cert/cert.go +++ b/vendor/k8s.io/client-go/util/cert/cert.go @@ -213,7 +213,7 @@ func GenerateSelfSignedCertKeyWithFixtures(host string, alternateIPs []net.IP, a if err := os.WriteFile(certFixturePath, certBuffer.Bytes(), 0644); err != nil { return nil, nil, fmt.Errorf("failed to write cert fixture to %s: %v", certFixturePath, err) } - if err := os.WriteFile(keyFixturePath, keyBuffer.Bytes(), 0644); err != nil { + if err := os.WriteFile(keyFixturePath, keyBuffer.Bytes(), 0600); err != nil { return nil, nil, fmt.Errorf("failed to write key fixture to %s: %v", certFixturePath, err) } } diff --git a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go index 26eacc2ba7..c1df720302 100644 --- a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go +++ b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go @@ -33,38 +33,81 @@ type DelayingInterface interface { AddAfter(item interface{}, duration time.Duration) } +// DelayingQueueConfig specifies optional configurations to customize a DelayingInterface. +type DelayingQueueConfig struct { + // Name for the queue. If unnamed, the metrics will not be registered. + Name string + + // MetricsProvider optionally allows specifying a metrics provider to use for the queue + // instead of the global provider. + MetricsProvider MetricsProvider + + // Clock optionally allows injecting a real or fake clock for testing purposes. + Clock clock.WithTicker + + // Queue optionally allows injecting custom queue Interface instead of the default one. + Queue Interface +} + // NewDelayingQueue constructs a new workqueue with delayed queuing ability. // NewDelayingQueue does not emit metrics. For use with a MetricsProvider, please use -// NewNamedDelayingQueue instead. +// NewDelayingQueueWithConfig instead and specify a name. func NewDelayingQueue() DelayingInterface { - return NewDelayingQueueWithCustomClock(clock.RealClock{}, "") + return NewDelayingQueueWithConfig(DelayingQueueConfig{}) +} + +// NewDelayingQueueWithConfig constructs a new workqueue with options to +// customize different properties. +func NewDelayingQueueWithConfig(config DelayingQueueConfig) DelayingInterface { + if config.Clock == nil { + config.Clock = clock.RealClock{} + } + + if config.Queue == nil { + config.Queue = NewWithConfig(QueueConfig{ + Name: config.Name, + MetricsProvider: config.MetricsProvider, + Clock: config.Clock, + }) + } + + return newDelayingQueue(config.Clock, config.Queue, config.Name, config.MetricsProvider) } // NewDelayingQueueWithCustomQueue constructs a new workqueue with ability to // inject custom queue Interface instead of the default one +// Deprecated: Use NewDelayingQueueWithConfig instead. func NewDelayingQueueWithCustomQueue(q Interface, name string) DelayingInterface { - return newDelayingQueue(clock.RealClock{}, q, name) + return NewDelayingQueueWithConfig(DelayingQueueConfig{ + Name: name, + Queue: q, + }) } -// NewNamedDelayingQueue constructs a new named workqueue with delayed queuing ability +// NewNamedDelayingQueue constructs a new named workqueue with delayed queuing ability. +// Deprecated: Use NewDelayingQueueWithConfig instead. func NewNamedDelayingQueue(name string) DelayingInterface { - return NewDelayingQueueWithCustomClock(clock.RealClock{}, name) + return NewDelayingQueueWithConfig(DelayingQueueConfig{Name: name}) } // NewDelayingQueueWithCustomClock constructs a new named workqueue -// with ability to inject real or fake clock for testing purposes +// with ability to inject real or fake clock for testing purposes. +// Deprecated: Use NewDelayingQueueWithConfig instead. func NewDelayingQueueWithCustomClock(clock clock.WithTicker, name string) DelayingInterface { - return newDelayingQueue(clock, NewNamed(name), name) + return NewDelayingQueueWithConfig(DelayingQueueConfig{ + Name: name, + Clock: clock, + }) } -func newDelayingQueue(clock clock.WithTicker, q Interface, name string) *delayingType { +func newDelayingQueue(clock clock.WithTicker, q Interface, name string, provider MetricsProvider) *delayingType { ret := &delayingType{ Interface: q, clock: clock, heartbeat: clock.NewTicker(maxWait), stopCh: make(chan struct{}), waitingForAddCh: make(chan *waitFor, 1000), - metrics: newRetryMetrics(name), + metrics: newRetryMetrics(name, provider), } go ret.waitingLoop() diff --git a/vendor/k8s.io/client-go/util/workqueue/metrics.go b/vendor/k8s.io/client-go/util/workqueue/metrics.go index 4b0a69616d..f012ccc554 100644 --- a/vendor/k8s.io/client-go/util/workqueue/metrics.go +++ b/vendor/k8s.io/client-go/util/workqueue/metrics.go @@ -244,13 +244,18 @@ func (f *queueMetricsFactory) newQueueMetrics(name string, clock clock.Clock) qu } } -func newRetryMetrics(name string) retryMetrics { +func newRetryMetrics(name string, provider MetricsProvider) retryMetrics { var ret *defaultRetryMetrics if len(name) == 0 { return ret } + + if provider == nil { + provider = globalMetricsFactory.metricsProvider + } + return &defaultRetryMetrics{ - retries: globalMetricsFactory.metricsProvider.NewRetriesMetric(name), + retries: provider.NewRetriesMetric(name), } } diff --git a/vendor/k8s.io/client-go/util/workqueue/queue.go b/vendor/k8s.io/client-go/util/workqueue/queue.go index 6f7063269f..380c064552 100644 --- a/vendor/k8s.io/client-go/util/workqueue/queue.go +++ b/vendor/k8s.io/client-go/util/workqueue/queue.go @@ -33,17 +33,60 @@ type Interface interface { ShuttingDown() bool } +// QueueConfig specifies optional configurations to customize an Interface. +type QueueConfig struct { + // Name for the queue. If unnamed, the metrics will not be registered. + Name string + + // MetricsProvider optionally allows specifying a metrics provider to use for the queue + // instead of the global provider. + MetricsProvider MetricsProvider + + // Clock ability to inject real or fake clock for testing purposes. + Clock clock.WithTicker +} + // New constructs a new work queue (see the package comment). func New() *Type { - return NewNamed("") + return NewWithConfig(QueueConfig{ + Name: "", + }) } +// NewWithConfig constructs a new workqueue with ability to +// customize different properties. +func NewWithConfig(config QueueConfig) *Type { + return newQueueWithConfig(config, defaultUnfinishedWorkUpdatePeriod) +} + +// NewNamed creates a new named queue. +// Deprecated: Use NewWithConfig instead. func NewNamed(name string) *Type { - rc := clock.RealClock{} + return NewWithConfig(QueueConfig{ + Name: name, + }) +} + +// newQueueWithConfig constructs a new named workqueue +// with the ability to customize different properties for testing purposes +func newQueueWithConfig(config QueueConfig, updatePeriod time.Duration) *Type { + var metricsFactory *queueMetricsFactory + if config.MetricsProvider != nil { + metricsFactory = &queueMetricsFactory{ + metricsProvider: config.MetricsProvider, + } + } else { + metricsFactory = &globalMetricsFactory + } + + if config.Clock == nil { + config.Clock = clock.RealClock{} + } + return newQueue( - rc, - globalMetricsFactory.newQueueMetrics(name, rc), - defaultUnfinishedWorkUpdatePeriod, + config.Clock, + metricsFactory.newQueueMetrics(config.Name, config.Clock), + updatePeriod, ) } diff --git a/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go b/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go index 91cd33f193..3e4016fb04 100644 --- a/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go +++ b/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go @@ -16,6 +16,8 @@ limitations under the License. package workqueue +import "k8s.io/utils/clock" + // RateLimitingInterface is an interface that rate limits items being added to the queue. type RateLimitingInterface interface { DelayingInterface @@ -32,29 +34,68 @@ type RateLimitingInterface interface { NumRequeues(item interface{}) int } +// RateLimitingQueueConfig specifies optional configurations to customize a RateLimitingInterface. + +type RateLimitingQueueConfig struct { + // Name for the queue. If unnamed, the metrics will not be registered. + Name string + + // MetricsProvider optionally allows specifying a metrics provider to use for the queue + // instead of the global provider. + MetricsProvider MetricsProvider + + // Clock optionally allows injecting a real or fake clock for testing purposes. + Clock clock.WithTicker + + // DelayingQueue optionally allows injecting custom delaying queue DelayingInterface instead of the default one. + DelayingQueue DelayingInterface +} + // NewRateLimitingQueue constructs a new workqueue with rateLimited queuing ability // Remember to call Forget! If you don't, you may end up tracking failures forever. // NewRateLimitingQueue does not emit metrics. For use with a MetricsProvider, please use -// NewNamedRateLimitingQueue instead. +// NewRateLimitingQueueWithConfig instead and specify a name. func NewRateLimitingQueue(rateLimiter RateLimiter) RateLimitingInterface { + return NewRateLimitingQueueWithConfig(rateLimiter, RateLimitingQueueConfig{}) +} + +// NewRateLimitingQueueWithConfig constructs a new workqueue with rateLimited queuing ability +// with options to customize different properties. +// Remember to call Forget! If you don't, you may end up tracking failures forever. +func NewRateLimitingQueueWithConfig(rateLimiter RateLimiter, config RateLimitingQueueConfig) RateLimitingInterface { + if config.Clock == nil { + config.Clock = clock.RealClock{} + } + + if config.DelayingQueue == nil { + config.DelayingQueue = NewDelayingQueueWithConfig(DelayingQueueConfig{ + Name: config.Name, + MetricsProvider: config.MetricsProvider, + Clock: config.Clock, + }) + } + return &rateLimitingType{ - DelayingInterface: NewDelayingQueue(), + DelayingInterface: config.DelayingQueue, rateLimiter: rateLimiter, } } +// NewNamedRateLimitingQueue constructs a new named workqueue with rateLimited queuing ability. +// Deprecated: Use NewRateLimitingQueueWithConfig instead. func NewNamedRateLimitingQueue(rateLimiter RateLimiter, name string) RateLimitingInterface { - return &rateLimitingType{ - DelayingInterface: NewNamedDelayingQueue(name), - rateLimiter: rateLimiter, - } + return NewRateLimitingQueueWithConfig(rateLimiter, RateLimitingQueueConfig{ + Name: name, + }) } +// NewRateLimitingQueueWithDelayingInterface constructs a new named workqueue with rateLimited queuing ability +// with the option to inject a custom delaying queue instead of the default one. +// Deprecated: Use NewRateLimitingQueueWithConfig instead. func NewRateLimitingQueueWithDelayingInterface(di DelayingInterface, rateLimiter RateLimiter) RateLimitingInterface { - return &rateLimitingType{ - DelayingInterface: di, - rateLimiter: rateLimiter, - } + return NewRateLimitingQueueWithConfig(rateLimiter, RateLimitingQueueConfig{ + DelayingQueue: di, + }) } // rateLimitingType wraps an Interface and provides rateLimited re-enquing diff --git a/vendor/k8s.io/code-generator/OWNERS b/vendor/k8s.io/code-generator/OWNERS index c59502195b..05162820a1 100644 --- a/vendor/k8s.io/code-generator/OWNERS +++ b/vendor/k8s.io/code-generator/OWNERS @@ -1,10 +1,12 @@ # See the OWNERS docs at https://go.k8s.io/owners approvers: + - deads2k - lavalamp - wojtek-t - sttts reviewers: + - deads2k - lavalamp - wojtek-t - sttts diff --git a/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/args/args.go b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/args/args.go new file mode 100644 index 0000000000..78f364841f --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/args/args.go @@ -0,0 +1,81 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package args + +import ( + "fmt" + "path" + + "github.com/spf13/pflag" + "k8s.io/gengo/args" + "k8s.io/gengo/types" + + codegenutil "k8s.io/code-generator/pkg/util" +) + +// CustomArgs is a wrapper for arguments to applyconfiguration-gen. +type CustomArgs struct { + // ExternalApplyConfigurations provides the locations of externally generated + // apply configuration types for types referenced by the go structs provided as input. + // Locations are provided as a comma separated list of .: + // entries. + // + // E.g. if a type references appsv1.Deployment, the location of its apply configuration should + // be provided: + // k8s.io/api/apps/v1.Deployment:k8s.io/client-go/applyconfigurations/apps/v1 + // + // meta/v1 types (TypeMeta and ObjectMeta) are always included and do not need to be passed in. + ExternalApplyConfigurations map[types.Name]string + + OpenAPISchemaFilePath string +} + +// NewDefaults returns default arguments for the generator. +func NewDefaults() (*args.GeneratorArgs, *CustomArgs) { + genericArgs := args.Default().WithoutDefaultFlagParsing() + customArgs := &CustomArgs{ + ExternalApplyConfigurations: map[types.Name]string{ + // Always include TypeMeta and ObjectMeta. They are sufficient for the vast majority of use cases. + {Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "TypeMeta"}: "k8s.io/client-go/applyconfigurations/meta/v1", + {Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "ObjectMeta"}: "k8s.io/client-go/applyconfigurations/meta/v1", + {Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "OwnerReference"}: "k8s.io/client-go/applyconfigurations/meta/v1", + }, + } + genericArgs.CustomArgs = customArgs + + if pkg := codegenutil.CurrentPackage(); len(pkg) != 0 { + genericArgs.OutputPackagePath = path.Join(pkg, "pkg/client/applyconfigurations") + } + + return genericArgs, customArgs +} + +func (ca *CustomArgs) AddFlags(fs *pflag.FlagSet, inputBase string) { + pflag.Var(NewExternalApplyConfigurationValue(&ca.ExternalApplyConfigurations, nil), "external-applyconfigurations", + "list of comma separated external apply configurations locations in .: form."+ + "For example: k8s.io/api/apps/v1.Deployment:k8s.io/client-go/applyconfigurations/apps/v1") + pflag.StringVar(&ca.OpenAPISchemaFilePath, "openapi-schema", "", + "path to the openapi schema containing all the types that apply configurations will be generated for") +} + +// Validate checks the given arguments. +func Validate(genericArgs *args.GeneratorArgs) error { + if len(genericArgs.OutputPackagePath) == 0 { + return fmt.Errorf("output package cannot be empty") + } + return nil +} diff --git a/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/args/externaltypes.go b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/args/externaltypes.go new file mode 100644 index 0000000000..0785fbea0e --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/args/externaltypes.go @@ -0,0 +1,122 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package args + +import ( + "bytes" + "encoding/csv" + "flag" + "fmt" + "strings" + + "k8s.io/gengo/types" +) + +type externalApplyConfigurationValue struct { + externals *map[types.Name]string +} + +func NewExternalApplyConfigurationValue(externals *map[types.Name]string, def []string) *externalApplyConfigurationValue { + val := new(externalApplyConfigurationValue) + val.externals = externals + if def != nil { + if err := val.set(def); err != nil { + panic(err) + } + } + return val +} + +var _ flag.Value = &externalApplyConfigurationValue{} + +func (s *externalApplyConfigurationValue) set(vs []string) error { + for _, input := range vs { + typ, pkg, err := parseExternalMapping(input) + if err != nil { + return err + } + if _, ok := (*s.externals)[typ]; ok { + return fmt.Errorf("duplicate type found in --external-applyconfigurations: %v", typ) + } + (*s.externals)[typ] = pkg + } + + return nil +} + +func (s *externalApplyConfigurationValue) Set(val string) error { + vs, err := readAsCSV(val) + if err != nil { + return err + } + if err := s.set(vs); err != nil { + return err + } + + return nil +} + +func (s *externalApplyConfigurationValue) Type() string { + return "string" +} + +func (s *externalApplyConfigurationValue) String() string { + var strs []string + for k, v := range *s.externals { + strs = append(strs, fmt.Sprintf("%s.%s:%s", k.Package, k.Name, v)) + } + str, _ := writeAsCSV(strs) + return "[" + str + "]" +} + +func readAsCSV(val string) ([]string, error) { + if val == "" { + return []string{}, nil + } + stringReader := strings.NewReader(val) + csvReader := csv.NewReader(stringReader) + return csvReader.Read() +} + +func writeAsCSV(vals []string) (string, error) { + b := &bytes.Buffer{} + w := csv.NewWriter(b) + err := w.Write(vals) + if err != nil { + return "", err + } + w.Flush() + return strings.TrimSuffix(b.String(), "\n"), nil +} + +func parseExternalMapping(mapping string) (typ types.Name, pkg string, err error) { + parts := strings.Split(mapping, ":") + if len(parts) != 2 { + return types.Name{}, "", fmt.Errorf("expected string of the form .: but got %s", mapping) + } + packageTypeStr := parts[0] + pkg = parts[1] + // need to split on the *last* dot, since k8s.io (and other valid packages) have a dot in it + lastDot := strings.LastIndex(packageTypeStr, ".") + if lastDot == -1 || lastDot == len(packageTypeStr)-1 { + return types.Name{}, "", fmt.Errorf("expected package and type of the form . but got %s", packageTypeStr) + } + structPkg := packageTypeStr[:lastDot] + structType := packageTypeStr[lastDot+1:] + + return types.Name{Package: structPkg, Name: structType}, pkg, nil +} diff --git a/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/applyconfiguration.go b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/applyconfiguration.go new file mode 100644 index 0000000000..8e02bb233b --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/applyconfiguration.go @@ -0,0 +1,423 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "io" + "strings" + + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + "k8s.io/klog/v2" + + "k8s.io/code-generator/cmd/client-gen/generators/util" + clientgentypes "k8s.io/code-generator/cmd/client-gen/types" +) + +// applyConfigurationGenerator produces apply configurations for a given GroupVersion and type. +type applyConfigurationGenerator struct { + generator.DefaultGen + outputPackage string + localPackage types.Name + groupVersion clientgentypes.GroupVersion + applyConfig applyConfig + imports namer.ImportTracker + refGraph refGraph + openAPIType *string // if absent, extraction function cannot be generated +} + +var _ generator.Generator = &applyConfigurationGenerator{} + +func (g *applyConfigurationGenerator) Filter(_ *generator.Context, t *types.Type) bool { + return t == g.applyConfig.Type +} + +func (g *applyConfigurationGenerator) Namers(*generator.Context) namer.NameSystems { + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.localPackage.Package, g.imports), + "singularKind": namer.NewPublicNamer(0), + } +} + +func (g *applyConfigurationGenerator) Imports(*generator.Context) (imports []string) { + return g.imports.ImportLines() +} + +// TypeParams provides a struct that an apply configuration +// is generated for as well as the apply configuration details +// and types referenced by the struct. +type TypeParams struct { + Struct *types.Type + ApplyConfig applyConfig + Tags util.Tags + APIVersion string + ExtractInto *types.Type + ParserFunc *types.Type + OpenAPIType *string +} + +type memberParams struct { + TypeParams + Member types.Member + MemberType *types.Type + JSONTags JSONTags + ArgType *types.Type // only set for maps and slices + EmbeddedIn *memberParams // parent embedded member, if any +} + +func (g *applyConfigurationGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "$", "$") + + klog.V(5).Infof("processing type %v", t) + typeParams := TypeParams{ + Struct: t, + ApplyConfig: g.applyConfig, + Tags: genclientTags(t), + APIVersion: g.groupVersion.ToAPIVersion(), + ExtractInto: extractInto, + ParserFunc: types.Ref(g.outputPackage+"/internal", "Parser"), + OpenAPIType: g.openAPIType, + } + + g.generateStruct(sw, typeParams) + + if typeParams.Tags.GenerateClient { + if typeParams.Tags.NonNamespaced { + sw.Do(clientgenTypeConstructorNonNamespaced, typeParams) + } else { + sw.Do(clientgenTypeConstructorNamespaced, typeParams) + } + if typeParams.OpenAPIType != nil { + g.generateClientgenExtract(sw, typeParams, !typeParams.Tags.NoStatus) + } + } else { + if hasTypeMetaField(t) { + sw.Do(constructorWithTypeMeta, typeParams) + } else { + sw.Do(constructor, typeParams) + } + } + g.generateWithFuncs(t, typeParams, sw, nil) + return sw.Error() +} + +func hasTypeMetaField(t *types.Type) bool { + for _, member := range t.Members { + if typeMeta.Name == member.Type.Name && member.Embedded { + return true + } + } + return false +} + +func blocklisted(t *types.Type, member types.Member) bool { + if objectMeta.Name == t.Name && member.Name == "ManagedFields" { + return true + } + if objectMeta.Name == t.Name && member.Name == "SelfLink" { + return true + } + // Hide any fields which are en route to deletion. + if strings.HasPrefix(member.Name, "ZZZ_") { + return true + } + return false +} + +func (g *applyConfigurationGenerator) generateWithFuncs(t *types.Type, typeParams TypeParams, sw *generator.SnippetWriter, embed *memberParams) { + for _, member := range t.Members { + if blocklisted(t, member) { + continue + } + memberType := g.refGraph.applyConfigForType(member.Type) + if g.refGraph.isApplyConfig(member.Type) { + memberType = &types.Type{Kind: types.Pointer, Elem: memberType} + } + if jsonTags, ok := lookupJSONTags(member); ok { + memberParams := memberParams{ + TypeParams: typeParams, + Member: member, + MemberType: memberType, + JSONTags: jsonTags, + EmbeddedIn: embed, + } + if memberParams.Member.Embedded { + g.generateWithFuncs(member.Type, typeParams, sw, &memberParams) + if !jsonTags.inline { + // non-inlined embeds are nillable and need a "ensure exists" utility function + sw.Do(ensureEmbedExists, memberParams) + } + continue + } + + // For slices where the items are generated apply configuration types, accept varargs of + // pointers of the type as "with" function arguments so the "with" function can be used like so: + // WithFoos(Foo().WithName("x"), Foo().WithName("y")) + if t := deref(member.Type); t.Kind == types.Slice && g.refGraph.isApplyConfig(t.Elem) { + memberParams.ArgType = &types.Type{Kind: types.Pointer, Elem: memberType.Elem} + g.generateMemberWithForSlice(sw, member, memberParams) + continue + } + // Note: There are no maps where the values are generated apply configurations (because + // associative lists are used instead). So if a type like this is ever introduced, the + // default "with" function generator will produce a working (but not entirely convenient "with" function) + // that would be used like so: + // WithMap(map[string]FooApplyConfiguration{*Foo().WithName("x")}) + + switch memberParams.Member.Type.Kind { + case types.Slice: + memberParams.ArgType = memberType.Elem + g.generateMemberWithForSlice(sw, member, memberParams) + case types.Map: + g.generateMemberWithForMap(sw, memberParams) + default: + g.generateMemberWith(sw, memberParams) + } + } + } +} + +func (g *applyConfigurationGenerator) generateStruct(sw *generator.SnippetWriter, typeParams TypeParams) { + sw.Do("// $.ApplyConfig.ApplyConfiguration|public$ represents an declarative configuration of the $.ApplyConfig.Type|public$ type for use\n", typeParams) + sw.Do("// with apply.\n", typeParams) + sw.Do("type $.ApplyConfig.ApplyConfiguration|public$ struct {\n", typeParams) + for _, structMember := range typeParams.Struct.Members { + if blocklisted(typeParams.Struct, structMember) { + continue + } + if structMemberTags, ok := lookupJSONTags(structMember); ok { + if !structMemberTags.inline { + structMemberTags.omitempty = true + } + params := memberParams{ + TypeParams: typeParams, + Member: structMember, + MemberType: g.refGraph.applyConfigForType(structMember.Type), + JSONTags: structMemberTags, + } + if structMember.Embedded { + if structMemberTags.inline { + sw.Do("$.MemberType|raw$ `json:\"$.JSONTags$\"`\n", params) + } else { + sw.Do("*$.MemberType|raw$ `json:\"$.JSONTags$\"`\n", params) + } + } else if isNillable(structMember.Type) { + sw.Do("$.Member.Name$ $.MemberType|raw$ `json:\"$.JSONTags$\"`\n", params) + } else { + sw.Do("$.Member.Name$ *$.MemberType|raw$ `json:\"$.JSONTags$\"`\n", params) + } + } + } + sw.Do("}\n", typeParams) +} + +func deref(t *types.Type) *types.Type { + for t.Kind == types.Pointer { + t = t.Elem + } + return t +} + +func isNillable(t *types.Type) bool { + return t.Kind == types.Slice || t.Kind == types.Map +} + +func (g *applyConfigurationGenerator) generateMemberWith(sw *generator.SnippetWriter, memberParams memberParams) { + sw.Do("// With$.Member.Name$ sets the $.Member.Name$ field in the declarative configuration to the given value\n", memberParams) + sw.Do("// and returns the receiver, so that objects can be built by chaining \"With\" function invocations.\n", memberParams) + sw.Do("// If called multiple times, the $.Member.Name$ field is set to the value of the last call.\n", memberParams) + sw.Do("func (b *$.ApplyConfig.ApplyConfiguration|public$) With$.Member.Name$(value $.MemberType|raw$) *$.ApplyConfig.ApplyConfiguration|public$ {\n", memberParams) + g.ensureEnbedExistsIfApplicable(sw, memberParams) + if g.refGraph.isApplyConfig(memberParams.Member.Type) || isNillable(memberParams.Member.Type) { + sw.Do("b.$.Member.Name$ = value\n", memberParams) + } else { + sw.Do("b.$.Member.Name$ = &value\n", memberParams) + } + sw.Do(" return b\n", memberParams) + sw.Do("}\n", memberParams) +} + +func (g *applyConfigurationGenerator) generateMemberWithForSlice(sw *generator.SnippetWriter, member types.Member, memberParams memberParams) { + memberIsPointerToSlice := member.Type.Kind == types.Pointer + if memberIsPointerToSlice { + sw.Do(ensureNonEmbedSliceExists, memberParams) + } + + sw.Do("// With$.Member.Name$ adds the given value to the $.Member.Name$ field in the declarative configuration\n", memberParams) + sw.Do("// and returns the receiver, so that objects can be build by chaining \"With\" function invocations.\n", memberParams) + sw.Do("// If called multiple times, values provided by each call will be appended to the $.Member.Name$ field.\n", memberParams) + sw.Do("func (b *$.ApplyConfig.ApplyConfiguration|public$) With$.Member.Name$(values ...$.ArgType|raw$) *$.ApplyConfig.ApplyConfiguration|public$ {\n", memberParams) + g.ensureEnbedExistsIfApplicable(sw, memberParams) + + if memberIsPointerToSlice { + sw.Do("b.ensure$.MemberType.Elem|public$Exists()\n", memberParams) + } + + sw.Do(" for i := range values {\n", memberParams) + if memberParams.ArgType.Kind == types.Pointer { + sw.Do("if values[i] == nil {\n", memberParams) + sw.Do(" panic(\"nil value passed to With$.Member.Name$\")\n", memberParams) + sw.Do("}\n", memberParams) + + if memberIsPointerToSlice { + sw.Do("*b.$.Member.Name$ = append(*b.$.Member.Name$, *values[i])\n", memberParams) + } else { + sw.Do("b.$.Member.Name$ = append(b.$.Member.Name$, *values[i])\n", memberParams) + } + } else { + if memberIsPointerToSlice { + sw.Do("*b.$.Member.Name$ = append(*b.$.Member.Name$, values[i])\n", memberParams) + } else { + sw.Do("b.$.Member.Name$ = append(b.$.Member.Name$, values[i])\n", memberParams) + } + } + sw.Do(" }\n", memberParams) + sw.Do(" return b\n", memberParams) + sw.Do("}\n", memberParams) +} + +func (g *applyConfigurationGenerator) generateMemberWithForMap(sw *generator.SnippetWriter, memberParams memberParams) { + sw.Do("// With$.Member.Name$ puts the entries into the $.Member.Name$ field in the declarative configuration\n", memberParams) + sw.Do("// and returns the receiver, so that objects can be build by chaining \"With\" function invocations.\n", memberParams) + sw.Do("// If called multiple times, the entries provided by each call will be put on the $.Member.Name$ field,\n", memberParams) + sw.Do("// overwriting an existing map entries in $.Member.Name$ field with the same key.\n", memberParams) + sw.Do("func (b *$.ApplyConfig.ApplyConfiguration|public$) With$.Member.Name$(entries $.MemberType|raw$) *$.ApplyConfig.ApplyConfiguration|public$ {\n", memberParams) + g.ensureEnbedExistsIfApplicable(sw, memberParams) + sw.Do(" if b.$.Member.Name$ == nil && len(entries) > 0 {\n", memberParams) + sw.Do(" b.$.Member.Name$ = make($.MemberType|raw$, len(entries))\n", memberParams) + sw.Do(" }\n", memberParams) + sw.Do(" for k, v := range entries {\n", memberParams) + sw.Do(" b.$.Member.Name$[k] = v\n", memberParams) + sw.Do(" }\n", memberParams) + sw.Do(" return b\n", memberParams) + sw.Do("}\n", memberParams) +} + +func (g *applyConfigurationGenerator) ensureEnbedExistsIfApplicable(sw *generator.SnippetWriter, memberParams memberParams) { + // Embedded types that are not inlined must be nillable so they are not included in the apply configuration + // when all their fields are omitted. + if memberParams.EmbeddedIn != nil && !memberParams.EmbeddedIn.JSONTags.inline { + sw.Do("b.ensure$.MemberType.Elem|public$Exists()\n", memberParams.EmbeddedIn) + } +} + +var ensureEmbedExists = ` +func (b *$.ApplyConfig.ApplyConfiguration|public$) ensure$.MemberType.Elem|public$Exists() { + if b.$.MemberType.Elem|public$ == nil { + b.$.MemberType.Elem|public$ = &$.MemberType.Elem|raw${} + } +} +` + +var ensureNonEmbedSliceExists = ` +func (b *$.ApplyConfig.ApplyConfiguration|public$) ensure$.MemberType.Elem|public$Exists() { + if b.$.Member.Name$ == nil { + b.$.Member.Name$ = &[]$.MemberType.Elem|raw${} + } +} +` + +var clientgenTypeConstructorNamespaced = ` +// $.ApplyConfig.Type|public$ constructs an declarative configuration of the $.ApplyConfig.Type|public$ type for use with +// apply. +func $.ApplyConfig.Type|public$(name, namespace string) *$.ApplyConfig.ApplyConfiguration|public$ { + b := &$.ApplyConfig.ApplyConfiguration|public${} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("$.ApplyConfig.Type|singularKind$") + b.WithAPIVersion("$.APIVersion$") + return b +} +` + +var clientgenTypeConstructorNonNamespaced = ` +// $.ApplyConfig.Type|public$ constructs an declarative configuration of the $.ApplyConfig.Type|public$ type for use with +// apply. +func $.ApplyConfig.Type|public$(name string) *$.ApplyConfig.ApplyConfiguration|public$ { + b := &$.ApplyConfig.ApplyConfiguration|public${} + b.WithName(name) + b.WithKind("$.ApplyConfig.Type|singularKind$") + b.WithAPIVersion("$.APIVersion$") + return b +} +` + +var constructorWithTypeMeta = ` +// $.ApplyConfig.ApplyConfiguration|public$ constructs an declarative configuration of the $.ApplyConfig.Type|public$ type for use with +// apply. +func $.ApplyConfig.Type|public$() *$.ApplyConfig.ApplyConfiguration|public$ { + b := &$.ApplyConfig.ApplyConfiguration|public${} + b.WithKind("$.ApplyConfig.Type|singularKind$") + b.WithAPIVersion("$.APIVersion$") + return b +} +` + +var constructor = ` +// $.ApplyConfig.ApplyConfiguration|public$ constructs an declarative configuration of the $.ApplyConfig.Type|public$ type for use with +// apply. +func $.ApplyConfig.Type|public$() *$.ApplyConfig.ApplyConfiguration|public$ { + return &$.ApplyConfig.ApplyConfiguration|public${} +} +` + +func (g *applyConfigurationGenerator) generateClientgenExtract(sw *generator.SnippetWriter, typeParams TypeParams, includeStatus bool) { + sw.Do(` +// Extract$.ApplyConfig.Type|public$ extracts the applied configuration owned by fieldManager from +// $.Struct|private$. If no managedFields are found in $.Struct|private$ for fieldManager, a +// $.ApplyConfig.ApplyConfiguration|public$ is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// $.Struct|private$ must be a unmodified $.Struct|public$ API object that was retrieved from the Kubernetes API. +// Extract$.ApplyConfig.Type|public$ provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func Extract$.ApplyConfig.Type|public$($.Struct|private$ *$.Struct|raw$, fieldManager string) (*$.ApplyConfig.ApplyConfiguration|public$, error) { + return extract$.ApplyConfig.Type|public$($.Struct|private$, fieldManager, "") +}`, typeParams) + if includeStatus { + sw.Do(` +// Extract$.ApplyConfig.Type|public$Status is the same as Extract$.ApplyConfig.Type|public$ except +// that it extracts the status subresource applied configuration. +// Experimental! +func Extract$.ApplyConfig.Type|public$Status($.Struct|private$ *$.Struct|raw$, fieldManager string) (*$.ApplyConfig.ApplyConfiguration|public$, error) { + return extract$.ApplyConfig.Type|public$($.Struct|private$, fieldManager, "status") +} +`, typeParams) + } + sw.Do(` +func extract$.ApplyConfig.Type|public$($.Struct|private$ *$.Struct|raw$, fieldManager string, subresource string) (*$.ApplyConfig.ApplyConfiguration|public$, error) { + b := &$.ApplyConfig.ApplyConfiguration|public${} + err := $.ExtractInto|raw$($.Struct|private$, $.ParserFunc|raw$().Type("$.OpenAPIType$"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName($.Struct|private$.Name) +`, typeParams) + if !typeParams.Tags.NonNamespaced { + sw.Do("b.WithNamespace($.Struct|private$.Namespace)\n", typeParams) + } + sw.Do(` + b.WithKind("$.ApplyConfig.Type|singularKind$") + b.WithAPIVersion("$.APIVersion$") + return b, nil +} +`, typeParams) +} diff --git a/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/internal.go b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/internal.go new file mode 100644 index 0000000000..2871b9d7f5 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/internal.go @@ -0,0 +1,99 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "io" + + "gopkg.in/yaml.v2" + + "k8s.io/kube-openapi/pkg/schemaconv" + + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" +) + +// utilGenerator generates the ForKind() utility function. +type internalGenerator struct { + generator.DefaultGen + outputPackage string + imports namer.ImportTracker + typeModels *typeModels + filtered bool +} + +var _ generator.Generator = &internalGenerator{} + +func (g *internalGenerator) Filter(*generator.Context, *types.Type) bool { + // generate file exactly once + if !g.filtered { + g.filtered = true + return true + } + return false +} + +func (g *internalGenerator) Namers(*generator.Context) namer.NameSystems { + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.outputPackage, g.imports), + "singularKind": namer.NewPublicNamer(0), + } +} + +func (g *internalGenerator) Imports(*generator.Context) (imports []string) { + return g.imports.ImportLines() +} + +func (g *internalGenerator) GenerateType(c *generator.Context, _ *types.Type, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "{{", "}}") + + schema, err := schemaconv.ToSchema(g.typeModels.models) + if err != nil { + return err + } + schemaYAML, err := yaml.Marshal(schema) + if err != nil { + return err + } + sw.Do(schemaBlock, map[string]interface{}{ + "schemaYAML": string(schemaYAML), + "smdParser": smdParser, + "smdNewParser": smdNewParser, + "yamlObject": yamlObject, + "yamlUnmarshal": yamlUnmarshal, + }) + + return sw.Error() +} + +var schemaBlock = ` +func Parser() *{{.smdParser|raw}} { + parserOnce.Do(func() { + var err error + parser, err = {{.smdNewParser|raw}}(schemaYAML) + if err != nil { + panic(fmt.Sprintf("Failed to parse schema: %v", err)) + } + }) + return parser +} + +var parserOnce sync.Once +var parser *{{.smdParser|raw}} +var schemaYAML = {{.yamlObject|raw}}(` + "`{{.schemaYAML}}`" + `) +` diff --git a/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/jsontagutil.go b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/jsontagutil.go new file mode 100644 index 0000000000..2a643290bb --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/jsontagutil.go @@ -0,0 +1,99 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "reflect" + "strings" + + "k8s.io/gengo/types" +) + +// TODO: This implements the same functionality as https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/runtime/converter.go#L236 +// but is based on the highly efficient approach from https://golang.org/src/encoding/json/encode.go + +// JSONTags represents a go json field tag. +type JSONTags struct { + name string + omit bool + inline bool + omitempty bool +} + +func (t JSONTags) String() string { + var tag string + if !t.inline { + tag += t.name + } + if t.omitempty { + tag += ",omitempty" + } + if t.inline { + tag += ",inline" + } + return tag +} + +func lookupJSONTags(m types.Member) (JSONTags, bool) { + tag := reflect.StructTag(m.Tags).Get("json") + if tag == "" || tag == "-" { + return JSONTags{}, false + } + name, opts := parseTag(tag) + if name == "" { + name = m.Name + } + return JSONTags{ + name: name, + omit: false, + inline: opts.Contains("inline"), + omitempty: opts.Contains("omitempty"), + }, true +} + +type tagOptions string + +// parseTag splits a struct field's json tag into its name and +// comma-separated options. +func parseTag(tag string) (string, tagOptions) { + if idx := strings.Index(tag, ","); idx != -1 { + return tag[:idx], tagOptions(tag[idx+1:]) + } + return tag, "" +} + +// Contains reports whether a comma-separated listAlias of options +// contains a particular substr flag. substr must be surrounded by a +// string boundary or commas. +func (o tagOptions) Contains(optionName string) bool { + if len(o) == 0 { + return false + } + s := string(o) + for s != "" { + var next string + i := strings.Index(s, ",") + if i >= 0 { + s, next = s[:i], s[i+1:] + } + if s == optionName { + return true + } + s = next + } + return false +} diff --git a/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/openapi.go b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/openapi.go new file mode 100644 index 0000000000..940926f345 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/openapi.go @@ -0,0 +1,198 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "encoding/json" + "fmt" + "os" + "strings" + + openapiv2 "github.com/google/gnostic/openapiv2" + "k8s.io/gengo/types" + utilproto "k8s.io/kube-openapi/pkg/util/proto" + "k8s.io/kube-openapi/pkg/validation/spec" +) + +type typeModels struct { + models utilproto.Models + gvkToOpenAPIType map[gvk]string +} + +type gvk struct { + group, version, kind string +} + +func newTypeModels(openAPISchemaFilePath string, pkgTypes map[string]*types.Package) (*typeModels, error) { + if len(openAPISchemaFilePath) == 0 { + return emptyModels, nil // No Extract() functions will be generated. + } + + rawOpenAPISchema, err := os.ReadFile(openAPISchemaFilePath) + if err != nil { + return nil, fmt.Errorf("failed to read openapi-schema file: %w", err) + } + + // Read in the provided openAPI schema. + openAPISchema := &spec.Swagger{} + err = json.Unmarshal(rawOpenAPISchema, openAPISchema) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal typeModels JSON: %w", err) + } + + // Build a mapping from openAPI type name to GVK. + // Find the root types needed by by client-go for apply. + gvkToOpenAPIType := map[gvk]string{} + rootDefs := map[string]spec.Schema{} + for _, p := range pkgTypes { + gv := groupVersion(p) + for _, t := range p.Types { + tags := genclientTags(t) + hasApply := tags.HasVerb("apply") || tags.HasVerb("applyStatus") + if tags.GenerateClient && hasApply { + openAPIType := friendlyName(typeName(t)) + gvk := gvk{ + group: gv.Group.String(), + version: gv.Version.String(), + kind: t.Name.Name, + } + rootDefs[openAPIType] = openAPISchema.Definitions[openAPIType] + gvkToOpenAPIType[gvk] = openAPIType + } + } + } + + // Trim the schema down to just the types needed by client-go for apply. + requiredDefs := make(map[string]spec.Schema) + for name, def := range rootDefs { + requiredDefs[name] = def + findReferenced(&def, openAPISchema.Definitions, requiredDefs) + } + openAPISchema.Definitions = requiredDefs + + // Convert the openAPI schema to the models format and validate it. + models, err := toValidatedModels(openAPISchema) + if err != nil { + return nil, err + } + return &typeModels{models: models, gvkToOpenAPIType: gvkToOpenAPIType}, nil +} + +var emptyModels = &typeModels{ + models: &utilproto.Definitions{}, + gvkToOpenAPIType: map[gvk]string{}, +} + +func toValidatedModels(openAPISchema *spec.Swagger) (utilproto.Models, error) { + // openapi_v2.ParseDocument only accepts a []byte of the JSON or YAML file to be parsed. + // so we do an inefficient marshal back to json and then read it back in as yaml + // but get the benefit of running the models through utilproto.NewOpenAPIData to + // validate all the references between types + rawMinimalOpenAPISchema, err := json.Marshal(openAPISchema) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal openAPI as JSON: %w", err) + } + + document, err := openapiv2.ParseDocument(rawMinimalOpenAPISchema) + if err != nil { + return nil, fmt.Errorf("failed to parse OpenAPI document for file: %w", err) + } + // Construct the models and validate all references are valid. + models, err := utilproto.NewOpenAPIData(document) + if err != nil { + return nil, fmt.Errorf("failed to create OpenAPI models for file: %w", err) + } + return models, nil +} + +// findReferenced recursively finds all schemas referenced from the given def. +// toValidatedModels makes sure no references get missed. +func findReferenced(def *spec.Schema, allSchemas, referencedOut map[string]spec.Schema) { + // follow $ref, if any + refPtr := def.Ref.GetPointer() + if refPtr != nil && !refPtr.IsEmpty() { + name := refPtr.String() + if !strings.HasPrefix(name, "/definitions/") { + return + } + name = strings.TrimPrefix(name, "/definitions/") + schema, ok := allSchemas[name] + if !ok { + panic(fmt.Sprintf("allSchemas schema is missing referenced type: %s", name)) + } + if _, ok := referencedOut[name]; !ok { + referencedOut[name] = schema + findReferenced(&schema, allSchemas, referencedOut) + } + } + + // follow any nested schemas + if def.Items != nil { + if def.Items.Schema != nil { + findReferenced(def.Items.Schema, allSchemas, referencedOut) + } + for _, item := range def.Items.Schemas { + findReferenced(&item, allSchemas, referencedOut) + } + } + if def.AllOf != nil { + for _, s := range def.AllOf { + findReferenced(&s, allSchemas, referencedOut) + } + } + if def.AnyOf != nil { + for _, s := range def.AnyOf { + findReferenced(&s, allSchemas, referencedOut) + } + } + if def.OneOf != nil { + for _, s := range def.OneOf { + findReferenced(&s, allSchemas, referencedOut) + } + } + if def.Not != nil { + findReferenced(def.Not, allSchemas, referencedOut) + } + if def.Properties != nil { + for _, prop := range def.Properties { + findReferenced(&prop, allSchemas, referencedOut) + } + } + if def.AdditionalProperties != nil && def.AdditionalProperties.Schema != nil { + findReferenced(def.AdditionalProperties.Schema, allSchemas, referencedOut) + } + if def.PatternProperties != nil { + for _, s := range def.PatternProperties { + findReferenced(&s, allSchemas, referencedOut) + } + } + if def.Dependencies != nil { + for _, d := range def.Dependencies { + if d.Schema != nil { + findReferenced(d.Schema, allSchemas, referencedOut) + } + } + } + if def.AdditionalItems != nil && def.AdditionalItems.Schema != nil { + findReferenced(def.AdditionalItems.Schema, allSchemas, referencedOut) + } + if def.Definitions != nil { + for _, s := range def.Definitions { + findReferenced(&s, allSchemas, referencedOut) + } + } +} diff --git a/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/packages.go b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/packages.go new file mode 100644 index 0000000000..bfeffda593 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/packages.go @@ -0,0 +1,297 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "fmt" + "path" + "path/filepath" + "sort" + "strings" + + "k8s.io/gengo/args" + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" + "k8s.io/klog/v2" + + applygenargs "k8s.io/code-generator/cmd/applyconfiguration-gen/args" + "k8s.io/code-generator/cmd/client-gen/generators/util" + clientgentypes "k8s.io/code-generator/cmd/client-gen/types" +) + +const ( + // ApplyConfigurationTypeSuffix is the suffix of generated apply configuration types. + ApplyConfigurationTypeSuffix = "ApplyConfiguration" +) + +// NameSystems returns the name system used by the generators in this package. +func NameSystems() namer.NameSystems { + return namer.NameSystems{ + "public": namer.NewPublicNamer(0), + "private": namer.NewPrivateNamer(0), + "raw": namer.NewRawNamer("", nil), + } +} + +// DefaultNameSystem returns the default name system for ordering the types to be +// processed by the generators in this package. +func DefaultNameSystem() string { + return "public" +} + +// Packages makes the client package definition. +func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages { + boilerplate, err := arguments.LoadGoBoilerplate() + if err != nil { + klog.Fatalf("Failed loading boilerplate: %v", err) + } + + pkgTypes := packageTypesForInputDirs(context, arguments.InputDirs, arguments.OutputPackagePath) + customArgs := arguments.CustomArgs.(*applygenargs.CustomArgs) + initialTypes := customArgs.ExternalApplyConfigurations + refs := refGraphForReachableTypes(context.Universe, pkgTypes, initialTypes) + typeModels, err := newTypeModels(customArgs.OpenAPISchemaFilePath, pkgTypes) + if err != nil { + klog.Fatalf("Failed build type models from typeModels %s: %v", customArgs.OpenAPISchemaFilePath, err) + } + + groupVersions := make(map[string]clientgentypes.GroupVersions) + groupGoNames := make(map[string]string) + applyConfigsForGroupVersion := make(map[clientgentypes.GroupVersion][]applyConfig) + + var packageList generator.Packages + for pkg, p := range pkgTypes { + gv := groupVersion(p) + + pkgType := types.Name{Name: gv.Group.PackageName(), Package: pkg} + + var toGenerate []applyConfig + for _, t := range p.Types { + // If we don't have an ObjectMeta field, we lack the information required to make the Apply or ApplyStatus call + // to the kube-apiserver, so we don't need to generate the type at all + clientTags := genclientTags(t) + if clientTags.GenerateClient && !hasObjectMetaField(t) { + klog.V(5).Infof("skipping type %v because does not have ObjectMeta", t) + continue + } + if typePkg, ok := refs[t.Name]; ok { + toGenerate = append(toGenerate, applyConfig{ + Type: t, + ApplyConfiguration: types.Ref(typePkg, t.Name.Name+ApplyConfigurationTypeSuffix), + }) + } + } + if len(toGenerate) == 0 { + continue // Don't generate empty packages + } + sort.Sort(applyConfigSort(toGenerate)) + + // generate the apply configurations + packageList = append(packageList, generatorForApplyConfigurationsPackage(arguments.OutputPackagePath, boilerplate, pkgType, gv, toGenerate, refs, typeModels)) + + // group all the generated apply configurations by gv so ForKind() can be generated + groupPackageName := gv.Group.NonEmpty() + groupVersionsEntry, ok := groupVersions[groupPackageName] + if !ok { + groupVersionsEntry = clientgentypes.GroupVersions{ + PackageName: groupPackageName, + Group: gv.Group, + } + } + groupVersionsEntry.Versions = append(groupVersionsEntry.Versions, clientgentypes.PackageVersion{ + Version: gv.Version, + Package: path.Clean(p.Path), + }) + + groupGoNames[groupPackageName] = goName(gv, p) + applyConfigsForGroupVersion[gv] = toGenerate + groupVersions[groupPackageName] = groupVersionsEntry + } + + // generate ForKind() utility function + packageList = append(packageList, generatorForUtils(arguments.OutputPackagePath, boilerplate, groupVersions, applyConfigsForGroupVersion, groupGoNames)) + // generate internal embedded schema, required for generated Extract functions + packageList = append(packageList, generatorForInternal(filepath.Join(arguments.OutputPackagePath, "internal"), boilerplate, typeModels)) + + return packageList +} + +func friendlyName(name string) string { + nameParts := strings.Split(name, "/") + // Reverse first part. e.g., io.k8s... instead of k8s.io... + if len(nameParts) > 0 && strings.Contains(nameParts[0], ".") { + parts := strings.Split(nameParts[0], ".") + for i, j := 0, len(parts)-1; i < j; i, j = i+1, j-1 { + parts[i], parts[j] = parts[j], parts[i] + } + nameParts[0] = strings.Join(parts, ".") + } + return strings.Join(nameParts, ".") +} + +func typeName(t *types.Type) string { + typePackage := t.Name.Package + if strings.Contains(typePackage, "/vendor/") { + typePackage = typePackage[strings.Index(typePackage, "/vendor/")+len("/vendor/"):] + } + return fmt.Sprintf("%s.%s", typePackage, t.Name.Name) +} + +func generatorForApplyConfigurationsPackage(outputPackagePath string, boilerplate []byte, packageName types.Name, gv clientgentypes.GroupVersion, typesToGenerate []applyConfig, refs refGraph, models *typeModels) *generator.DefaultPackage { + return &generator.DefaultPackage{ + PackageName: gv.Version.PackageName(), + PackagePath: packageName.Package, + HeaderText: boilerplate, + GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + for _, toGenerate := range typesToGenerate { + var openAPIType *string + gvk := gvk{ + group: gv.Group.String(), + version: gv.Version.String(), + kind: toGenerate.Type.Name.Name, + } + if v, ok := models.gvkToOpenAPIType[gvk]; ok { + openAPIType = &v + } + + generators = append(generators, &applyConfigurationGenerator{ + DefaultGen: generator.DefaultGen{ + OptionalName: strings.ToLower(toGenerate.Type.Name.Name), + }, + outputPackage: outputPackagePath, + localPackage: packageName, + groupVersion: gv, + applyConfig: toGenerate, + imports: generator.NewImportTracker(), + refGraph: refs, + openAPIType: openAPIType, + }) + } + return generators + }, + } +} + +func generatorForUtils(outPackagePath string, boilerplate []byte, groupVersions map[string]clientgentypes.GroupVersions, applyConfigsForGroupVersion map[clientgentypes.GroupVersion][]applyConfig, groupGoNames map[string]string) *generator.DefaultPackage { + return &generator.DefaultPackage{ + PackageName: filepath.Base(outPackagePath), + PackagePath: outPackagePath, + HeaderText: boilerplate, + GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + generators = append(generators, &utilGenerator{ + DefaultGen: generator.DefaultGen{ + OptionalName: "utils", + }, + outputPackage: outPackagePath, + imports: generator.NewImportTracker(), + groupVersions: groupVersions, + typesForGroupVersion: applyConfigsForGroupVersion, + groupGoNames: groupGoNames, + }) + return generators + }, + } +} + +func generatorForInternal(outPackagePath string, boilerplate []byte, models *typeModels) *generator.DefaultPackage { + return &generator.DefaultPackage{ + PackageName: filepath.Base(outPackagePath), + PackagePath: outPackagePath, + HeaderText: boilerplate, + GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + generators = append(generators, &internalGenerator{ + DefaultGen: generator.DefaultGen{ + OptionalName: "internal", + }, + outputPackage: outPackagePath, + imports: generator.NewImportTracker(), + typeModels: models, + }) + return generators + }, + } +} + +func goName(gv clientgentypes.GroupVersion, p *types.Package) string { + goName := namer.IC(strings.Split(gv.Group.NonEmpty(), ".")[0]) + if override := types.ExtractCommentTags("+", p.Comments)["groupGoName"]; override != nil { + goName = namer.IC(override[0]) + } + return goName +} + +func packageTypesForInputDirs(context *generator.Context, inputDirs []string, outputPath string) map[string]*types.Package { + pkgTypes := map[string]*types.Package{} + for _, inputDir := range inputDirs { + p := context.Universe.Package(inputDir) + internal := isInternalPackage(p) + if internal { + klog.Warningf("Skipping internal package: %s", p.Path) + continue + } + // This is how the client generator finds the package we are creating. It uses the API package name, not the group name. + // This matches the approach of the client-gen, so the two generator can work together. + // For example, if openshift/api/cloudnetwork/v1 contains an apigroup cloud.network.openshift.io, the client-gen + // builds a package called cloudnetwork/v1 to contain it. This change makes the applyconfiguration-gen use the same. + _, gvPackageString := util.ParsePathGroupVersion(p.Path) + pkg := filepath.Join(outputPath, strings.ToLower(gvPackageString)) + pkgTypes[pkg] = p + } + return pkgTypes +} + +func groupVersion(p *types.Package) (gv clientgentypes.GroupVersion) { + parts := strings.Split(p.Path, "/") + gv.Group = clientgentypes.Group(parts[len(parts)-2]) + gv.Version = clientgentypes.Version(parts[len(parts)-1]) + + // If there's a comment of the form "// +groupName=somegroup" or + // "// +groupName=somegroup.foo.bar.io", use the first field (somegroup) as the name of the + // group when generating. + if override := types.ExtractCommentTags("+", p.Comments)["groupName"]; override != nil { + gv.Group = clientgentypes.Group(override[0]) + } + return gv +} + +// isInternalPackage returns true if the package is an internal package +func isInternalPackage(p *types.Package) bool { + for _, t := range p.Types { + for _, member := range t.Members { + if member.Name == "ObjectMeta" { + return isInternal(member) + } + } + } + return false +} + +// isInternal returns true if the tags for a member do not contain a json tag +func isInternal(m types.Member) bool { + _, ok := lookupJSONTags(m) + return !ok +} + +func hasObjectMetaField(t *types.Type) bool { + for _, member := range t.Members { + if objectMeta.Name == member.Type.Name && member.Embedded { + return true + } + } + return false +} diff --git a/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/refgraph.go b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/refgraph.go new file mode 100644 index 0000000000..d1f9511279 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/refgraph.go @@ -0,0 +1,179 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "k8s.io/gengo/types" + + "k8s.io/code-generator/cmd/client-gen/generators/util" +) + +// refGraph maps existing types to the package the corresponding applyConfig types will be generated in +// so that references between apply configurations can be correctly generated. +type refGraph map[types.Name]string + +// refGraphForReachableTypes returns a refGraph that contains all reachable types from +// the root clientgen types of the provided packages. +func refGraphForReachableTypes(universe types.Universe, pkgTypes map[string]*types.Package, initialTypes map[types.Name]string) refGraph { + var refs refGraph = initialTypes + + // Include only types that are reachable from the root clientgen types. + // We don't want to generate apply configurations for types that are not reachable from a root + // clientgen type. + reachableTypes := map[types.Name]*types.Type{} + for _, p := range pkgTypes { + for _, t := range p.Types { + tags := genclientTags(t) + hasApply := tags.HasVerb("apply") || tags.HasVerb("applyStatus") + if tags.GenerateClient && hasApply { + findReachableTypes(t, reachableTypes) + } + // If any apply extensions have custom inputs, add them. + for _, extension := range tags.Extensions { + if extension.HasVerb("apply") { + if len(extension.InputTypeOverride) > 0 { + inputType := *t + if name, pkg := extension.Input(); len(pkg) > 0 { + inputType = *(universe.Type(types.Name{Package: pkg, Name: name})) + } else { + inputType.Name.Name = extension.InputTypeOverride + } + findReachableTypes(&inputType, reachableTypes) + } + } + } + } + } + for pkg, p := range pkgTypes { + for _, t := range p.Types { + if _, ok := reachableTypes[t.Name]; !ok { + continue + } + if requiresApplyConfiguration(t) { + refs[t.Name] = pkg + } + } + } + + return refs +} + +// applyConfigForType find the type used in the generate apply configurations for a field. +// This may either be an existing type or one of the other generated applyConfig types. +func (t refGraph) applyConfigForType(field *types.Type) *types.Type { + switch field.Kind { + case types.Struct: + if pkg, ok := t[field.Name]; ok { // TODO(jpbetz): Refs to types defined in a separate system (e.g. TypeMeta if generating a 3rd party controller) end up referencing the go struct, not the apply configuration type + return types.Ref(pkg, field.Name.Name+ApplyConfigurationTypeSuffix) + } + return field + case types.Map: + if _, ok := t[field.Elem.Name]; ok { + return &types.Type{ + Kind: types.Map, + Elem: t.applyConfigForType(field.Elem), + Key: t.applyConfigForType(field.Key), + } + } + return field + case types.Slice: + if _, ok := t[field.Elem.Name]; ok { + return &types.Type{ + Kind: types.Slice, + Elem: t.applyConfigForType(field.Elem), + } + } + return field + case types.Pointer: + return t.applyConfigForType(field.Elem) + default: + return field + } +} + +func (t refGraph) isApplyConfig(field *types.Type) bool { + switch field.Kind { + case types.Struct: + _, ok := t[field.Name] + return ok + case types.Pointer: + return t.isApplyConfig(field.Elem) + } + return false +} + +// genclientTags returns the genclient Tags for the given type. +func genclientTags(t *types.Type) util.Tags { + return util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)) +} + +// findReachableTypes finds all types transitively reachable from a given root type, including +// the root type itself. +func findReachableTypes(t *types.Type, referencedTypes map[types.Name]*types.Type) { + if _, ok := referencedTypes[t.Name]; ok { + return + } + referencedTypes[t.Name] = t + + if t.Elem != nil { + findReachableTypes(t.Elem, referencedTypes) + } + if t.Underlying != nil { + findReachableTypes(t.Underlying, referencedTypes) + } + if t.Key != nil { + findReachableTypes(t.Key, referencedTypes) + } + for _, m := range t.Members { + findReachableTypes(m.Type, referencedTypes) + } +} + +// excludeTypes contains well known types that we do not generate apply configurations for. +// Hard coding because we only have two, very specific types that serve a special purpose +// in the type system here. +var excludeTypes = map[types.Name]struct{}{ + rawExtension.Name: {}, + unknown.Name: {}, + // DO NOT ADD TO THIS LIST. If we need to exclude other types, we should consider allowing the + // go type declarations to be annotated as excluded from this generator. +} + +// requiresApplyConfiguration returns true if a type applyConfig should be generated for the given type. +// types applyConfig are only generated for struct types that contain fields with json tags. +func requiresApplyConfiguration(t *types.Type) bool { + for t.Kind == types.Alias { + t = t.Underlying + } + if t.Kind != types.Struct { + return false + } + if _, ok := excludeTypes[t.Name]; ok { + return false + } + var hasJSONTaggedMembers bool + for _, member := range t.Members { + if _, ok := lookupJSONTags(member); ok { + hasJSONTaggedMembers = true + } + } + if !hasJSONTaggedMembers { + return false + } + + return true +} diff --git a/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/types.go b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/types.go new file mode 100644 index 0000000000..66578ae048 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/types.go @@ -0,0 +1,33 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import "k8s.io/gengo/types" + +var ( + applyConfiguration = types.Ref("k8s.io/apimachinery/pkg/runtime", "ApplyConfiguration") + groupVersionKind = types.Ref("k8s.io/apimachinery/pkg/runtime/schema", "GroupVersionKind") + typeMeta = types.Ref("k8s.io/apimachinery/pkg/apis/meta/v1", "TypeMeta") + objectMeta = types.Ref("k8s.io/apimachinery/pkg/apis/meta/v1", "ObjectMeta") + rawExtension = types.Ref("k8s.io/apimachinery/pkg/runtime", "RawExtension") + unknown = types.Ref("k8s.io/apimachinery/pkg/runtime", "Unknown") + extractInto = types.Ref("k8s.io/apimachinery/pkg/util/managedfields", "ExtractInto") + smdNewParser = types.Ref("sigs.k8s.io/structured-merge-diff/v4/typed", "NewParser") + smdParser = types.Ref("sigs.k8s.io/structured-merge-diff/v4/typed", "Parser") + yamlObject = types.Ref("sigs.k8s.io/structured-merge-diff/v4/typed", "YAMLObject") + yamlUnmarshal = types.Ref("gopkg.in/yaml.v2", "Unmarshal") +) diff --git a/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/util.go b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/util.go new file mode 100644 index 0000000000..258293afea --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/util.go @@ -0,0 +1,163 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "io" + "sort" + "strings" + + clientgentypes "k8s.io/code-generator/cmd/client-gen/types" + + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" +) + +// utilGenerator generates the ForKind() utility function. +type utilGenerator struct { + generator.DefaultGen + outputPackage string + imports namer.ImportTracker + groupVersions map[string]clientgentypes.GroupVersions + groupGoNames map[string]string + typesForGroupVersion map[clientgentypes.GroupVersion][]applyConfig + filtered bool +} + +var _ generator.Generator = &utilGenerator{} + +func (g *utilGenerator) Filter(*generator.Context, *types.Type) bool { + // generate file exactly once + if !g.filtered { + g.filtered = true + return true + } + return false +} + +func (g *utilGenerator) Namers(*generator.Context) namer.NameSystems { + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.outputPackage, g.imports), + "singularKind": namer.NewPublicNamer(0), + } +} + +func (g *utilGenerator) Imports(*generator.Context) (imports []string) { + return g.imports.ImportLines() +} + +type group struct { + GroupGoName string + Name string + Versions []*version +} + +type groupSort []group + +func (g groupSort) Len() int { return len(g) } +func (g groupSort) Less(i, j int) bool { + return strings.ToLower(g[i].Name) < strings.ToLower(g[j].Name) +} +func (g groupSort) Swap(i, j int) { g[i], g[j] = g[j], g[i] } + +type version struct { + Name string + GoName string + Resources []applyConfig +} + +type versionSort []*version + +func (v versionSort) Len() int { return len(v) } +func (v versionSort) Less(i, j int) bool { + return strings.ToLower(v[i].Name) < strings.ToLower(v[j].Name) +} +func (v versionSort) Swap(i, j int) { v[i], v[j] = v[j], v[i] } + +type applyConfig struct { + Type *types.Type + ApplyConfiguration *types.Type +} + +type applyConfigSort []applyConfig + +func (v applyConfigSort) Len() int { return len(v) } +func (v applyConfigSort) Less(i, j int) bool { + return strings.ToLower(v[i].Type.Name.Name) < strings.ToLower(v[j].Type.Name.Name) +} +func (v applyConfigSort) Swap(i, j int) { v[i], v[j] = v[j], v[i] } + +func (g *utilGenerator) GenerateType(c *generator.Context, _ *types.Type, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "{{", "}}") + + var groups []group + schemeGVs := make(map[*version]*types.Type) + + for groupPackageName, groupVersions := range g.groupVersions { + group := group{ + GroupGoName: g.groupGoNames[groupPackageName], + Name: groupVersions.Group.NonEmpty(), + Versions: []*version{}, + } + for _, v := range groupVersions.Versions { + gv := clientgentypes.GroupVersion{Group: groupVersions.Group, Version: v.Version} + version := &version{ + Name: v.Version.NonEmpty(), + GoName: namer.IC(v.Version.NonEmpty()), + Resources: g.typesForGroupVersion[gv], + } + schemeGVs[version] = c.Universe.Variable(types.Name{ + Package: g.typesForGroupVersion[gv][0].Type.Name.Package, + Name: "SchemeGroupVersion", + }) + group.Versions = append(group.Versions, version) + } + sort.Sort(versionSort(group.Versions)) + groups = append(groups, group) + } + sort.Sort(groupSort(groups)) + + m := map[string]interface{}{ + "groups": groups, + "schemeGVs": schemeGVs, + "schemaGroupVersionKind": groupVersionKind, + "applyConfiguration": applyConfiguration, + } + sw.Do(forKindFunc, m) + + return sw.Error() +} + +var forKindFunc = ` +// ForKind returns an apply configuration type for the given GroupVersionKind, or nil if no +// apply configuration type exists for the given GroupVersionKind. +func ForKind(kind {{.schemaGroupVersionKind|raw}}) interface{} { + switch kind { + {{range $group := .groups -}}{{$GroupGoName := .GroupGoName -}} + {{range $version := .Versions -}} + // Group={{$group.Name}}, Version={{.Name}} + {{range .Resources -}} + case {{index $.schemeGVs $version|raw}}.WithKind("{{.Type|singularKind}}"): + return &{{.ApplyConfiguration|raw}}{} + {{end}} + {{end}} + {{end -}} + } + return nil +} +` diff --git a/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/main.go b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/main.go new file mode 100644 index 0000000000..b4e264042a --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/main.go @@ -0,0 +1,54 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// typebuilder-gen is a tool for auto-generating apply builder functions. +package main + +import ( + "flag" + + "github.com/spf13/pflag" + "k8s.io/klog/v2" + + generatorargs "k8s.io/code-generator/cmd/applyconfiguration-gen/args" + "k8s.io/code-generator/cmd/applyconfiguration-gen/generators" +) + +func main() { + klog.InitFlags(nil) + genericArgs, customArgs := generatorargs.NewDefaults() + genericArgs.AddFlags(pflag.CommandLine) + customArgs.AddFlags(pflag.CommandLine, "k8s.io/kubernetes/pkg/apis") // TODO: move this input path out of applyconfiguration-gen + if err := flag.Set("logtostderr", "true"); err != nil { + klog.Fatalf("Error: %v", err) + } + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + pflag.Parse() + + if err := generatorargs.Validate(genericArgs); err != nil { + klog.Fatalf("Error: %v", err) + } + + // Run it. + if err := genericArgs.Execute( + generators.NameSystems(), + generators.DefaultNameSystem(), + generators.Packages, + ); err != nil { + klog.Fatalf("Error: %v", err) + } + klog.V(2).Info("Completed successfully.") +} diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go index 6739a3fa46..ef4466d800 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go @@ -131,12 +131,10 @@ func DefaultNameSystem() string { func packageForGroup(gv clientgentypes.GroupVersion, typeList []*types.Type, clientsetPackage string, groupPackageName string, groupGoName string, apiPath string, srcTreePath string, inputPackage string, applyBuilderPackage string, boilerplate []byte) generator.Package { groupVersionClientPackage := filepath.Join(clientsetPackage, "typed", strings.ToLower(groupPackageName), strings.ToLower(gv.Version.NonEmpty())) return &generator.DefaultPackage{ - PackageName: strings.ToLower(gv.Version.NonEmpty()), - PackagePath: groupVersionClientPackage, - HeaderText: boilerplate, - PackageDocumentation: []byte( - `// This package has the automatically generated typed clients. -`), + PackageName: strings.ToLower(gv.Version.NonEmpty()), + PackagePath: groupVersionClientPackage, + HeaderText: boilerplate, + PackageDocumentation: []byte("// This package has the automatically generated typed clients.\n"), // GeneratorFunc returns a list of generators. Each generator makes a // single file. GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { @@ -200,16 +198,10 @@ func packageForClientset(customArgs *clientgenargs.CustomArgs, clientsetPackage PackageName: customArgs.ClientsetName, PackagePath: clientsetPackage, HeaderText: boilerplate, - PackageDocumentation: []byte( - `// This package has the automatically generated clientset. -`), // GeneratorFunc returns a list of generators. Each generator generates a // single file. GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { generators = []generator.Generator{ - // Always generate a "doc.go" file. - generator.DefaultGen{OptionalName: "doc"}, - &genClientset{ DefaultGen: generator.DefaultGen{ OptionalName: "clientset", @@ -242,12 +234,10 @@ NextGroup: } return &generator.DefaultPackage{ - PackageName: "scheme", - PackagePath: schemePackage, - HeaderText: boilerplate, - PackageDocumentation: []byte( - `// This package contains the scheme of the automatically generated clientset. -`), + PackageName: "scheme", + PackagePath: schemePackage, + HeaderText: boilerplate, + PackageDocumentation: []byte("// This package contains the scheme of the automatically generated clientset.\n"), // GeneratorFunc returns a list of generators. Each generator generates a // single file. GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go index ec40fb4ed7..dce920ad19 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go @@ -27,7 +27,6 @@ import ( "k8s.io/gengo/types" "k8s.io/code-generator/cmd/client-gen/generators/util" - "k8s.io/code-generator/cmd/client-gen/path" ) // genFakeForType produces a file for each top-level type. @@ -93,51 +92,33 @@ func (g *genFakeForType) GenerateType(c *generator.Context, t *types.Type, w io. if err != nil { return err } - canonicalGroup := g.group - if canonicalGroup == "core" { - canonicalGroup = "" - } - - groupName := g.group - if g.group == "core" { - groupName = "" - } - - // allow user to define a group name that's different from the one parsed from the directory. - p := c.Universe.Package(path.Vendorless(g.inputPackage)) - if override := types.ExtractCommentTags("+", p.Comments)["groupName"]; override != nil { - groupName = override[0] - } const pkgClientGoTesting = "k8s.io/client-go/testing" m := map[string]interface{}{ - "type": t, - "inputType": t, - "resultType": t, - "subresourcePath": "", - "package": pkg, - "Package": namer.IC(pkg), - "namespaced": !tags.NonNamespaced, - "Group": namer.IC(g.group), - "GroupGoName": g.groupGoName, - "Version": namer.IC(g.version), - "group": canonicalGroup, - "groupName": groupName, - "version": g.version, - "CreateOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "CreateOptions"}), - "DeleteOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "DeleteOptions"}), - "GetOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "GetOptions"}), - "ListOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "ListOptions"}), - "PatchOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "PatchOptions"}), - "ApplyOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "ApplyOptions"}), - "UpdateOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "UpdateOptions"}), - "Everything": c.Universe.Function(types.Name{Package: "k8s.io/apimachinery/pkg/labels", Name: "Everything"}), - "GroupVersionResource": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/runtime/schema", Name: "GroupVersionResource"}), - "GroupVersionKind": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/runtime/schema", Name: "GroupVersionKind"}), - "PatchType": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/types", Name: "PatchType"}), - "ApplyPatchType": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/types", Name: "ApplyPatchType"}), - "watchInterface": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/watch", Name: "Interface"}), - "jsonMarshal": c.Universe.Type(types.Name{Package: "encoding/json", Name: "Marshal"}), + "type": t, + "inputType": t, + "resultType": t, + "subresourcePath": "", + "package": pkg, + "Package": namer.IC(pkg), + "namespaced": !tags.NonNamespaced, + "Group": namer.IC(g.group), + "GroupGoName": g.groupGoName, + "Version": namer.IC(g.version), + "version": g.version, + "SchemeGroupVersion": c.Universe.Type(types.Name{Package: t.Name.Package, Name: "SchemeGroupVersion"}), + "CreateOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "CreateOptions"}), + "DeleteOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "DeleteOptions"}), + "GetOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "GetOptions"}), + "ListOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "ListOptions"}), + "PatchOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "PatchOptions"}), + "ApplyOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "ApplyOptions"}), + "UpdateOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "UpdateOptions"}), + "Everything": c.Universe.Function(types.Name{Package: "k8s.io/apimachinery/pkg/labels", Name: "Everything"}), + "PatchType": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/types", Name: "PatchType"}), + "ApplyPatchType": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/types", Name: "ApplyPatchType"}), + "watchInterface": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/watch", Name: "Interface"}), + "jsonMarshal": c.Universe.Type(types.Name{Package: "encoding/json", Name: "Marshal"}), "NewRootListAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootListAction"}), "NewListAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewListAction"}), @@ -340,11 +321,11 @@ type Fake$.type|publicPlural$ struct { ` var resource = ` -var $.type|allLowercasePlural$Resource = $.GroupVersionResource|raw${Group: "$.groupName$", Version: "$.version$", Resource: "$.type|resource$"} +var $.type|allLowercasePlural$Resource = $.SchemeGroupVersion|raw$.WithResource("$.type|resource$") ` var kind = ` -var $.type|allLowercasePlural$Kind = $.GroupVersionKind|raw${Group: "$.groupName$", Version: "$.version$", Kind: "$.type|singularKind$"} +var $.type|allLowercasePlural$Kind = $.SchemeGroupVersion|raw$.WithKind("$.type|singularKind$") ` var listTemplate = ` diff --git a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/cmd.go b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/cmd.go index c472b15dad..ff267e2610 100644 --- a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/cmd.go +++ b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/cmd.go @@ -50,6 +50,7 @@ type Generator struct { KeepGogoproto bool SkipGeneratedRewrite bool DropEmbeddedFields string + TrimPathPrefix string } func New() *Generator { @@ -95,6 +96,7 @@ func (g *Generator) BindFlags(flag *flag.FlagSet) { flag.BoolVar(&g.KeepGogoproto, "keep-gogoproto", g.KeepGogoproto, "If true, the generated IDL will contain gogoprotobuf extensions which are normally removed") flag.BoolVar(&g.SkipGeneratedRewrite, "skip-generated-rewrite", g.SkipGeneratedRewrite, "If true, skip fixing up the generated.pb.go file (debugging only).") flag.StringVar(&g.DropEmbeddedFields, "drop-embedded-fields", g.DropEmbeddedFields, "Comma-delimited list of embedded Go types to omit from generated protobufs") + flag.StringVar(&g.TrimPathPrefix, "trim-path-prefix", g.TrimPathPrefix, "If set, trim the specified prefix from --output-package when generating files.") } func Run(g *Generator) { @@ -200,6 +202,7 @@ func Run(g *Generator) { c.Verify = g.Common.VerifyOnly c.FileTypes["protoidl"] = NewProtoFile() + c.TrimPathPrefix = g.TrimPathPrefix // order package by imports, importees first deps := deps(c, protobufNames.packages) @@ -270,14 +273,28 @@ func Run(g *Generator) { outputPath = filepath.Join(g.VendorOutputBase, p.OutputPath()) } + // When working outside of GOPATH, we typically won't want to generate the + // full path for a package. For example, if our current project's root/base + // package is github.com/foo/bar, outDir=., p.Path()=github.com/foo/bar/generated, + // then we really want to be writing files to ./generated, not ./github.com/foo/bar/generated. + // The following will trim a path prefix (github.com/foo/bar) from p.Path() to arrive at + // a relative path that works with projects not in GOPATH. + if g.TrimPathPrefix != "" { + separator := string(filepath.Separator) + if !strings.HasSuffix(g.TrimPathPrefix, separator) { + g.TrimPathPrefix += separator + } + + path = strings.TrimPrefix(path, g.TrimPathPrefix) + outputPath = strings.TrimPrefix(outputPath, g.TrimPathPrefix) + } + // generate the gogoprotobuf protoc cmd := exec.Command("protoc", append(args, path)...) out, err := cmd.CombinedOutput() - if len(out) > 0 { - log.Print(string(out)) - } if err != nil { log.Println(strings.Join(cmd.Args, " ")) + log.Println(string(out)) log.Fatalf("Unable to generate protoc on %s: %v", p.PackageName, err) } @@ -397,9 +414,9 @@ func importOrder(deps map[string][]string) ([]string, error) { if len(remainingNodes) > 0 { return nil, fmt.Errorf("cycle: remaining nodes: %#v, remaining edges: %#v", remainingNodes, graph) } - for _, n := range sorted { - fmt.Println("topological order", n) - } + //for _, n := range sorted { + // fmt.Println("topological order", n) + //} return sorted, nil } diff --git a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/parser.go b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/parser.go index 1d27f95806..c4cf66e744 100644 --- a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/parser.go +++ b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/parser.go @@ -109,8 +109,7 @@ func RewriteGeneratedGogoProtobufFile(name string, extractFn ExtractFunc, option // as being "optional" (they may be nil on the wire). This allows protobuf to serialize a map or slice and // properly discriminate between empty and nil (which is not possible in protobuf). // TODO: move into upstream gogo-protobuf once https://github.com/gogo/protobuf/issues/181 -// -// has agreement +// has agreement func rewriteOptionalMethods(decl ast.Decl, isOptional OptionalFunc) { switch t := decl.(type) { case *ast.FuncDecl: diff --git a/vendor/k8s.io/code-generator/generate-groups.sh b/vendor/k8s.io/code-generator/generate-groups.sh index 223423b664..a511ef1728 100644 --- a/vendor/k8s.io/code-generator/generate-groups.sh +++ b/vendor/k8s.io/code-generator/generate-groups.sh @@ -25,7 +25,7 @@ if [ "$#" -lt 4 ] || [ "${1}" == "--help" ]; then cat < ... - the generators comma separated to run (deepcopy,defaulter,client,lister,informer) or "all". + the generators comma separated to run (deepcopy,defaulter,applyconfiguration,client,lister,informer) or "all". the output package name (e.g. github.com/example/project/pkg/generated). the external types dir (e.g. github.com/example/api or github.com/example/project/pkg/apis). the groups and their versions in the format "groupA:v1,v2 groupB:v1 groupC:v2", relative @@ -50,7 +50,7 @@ shift 4 # To support running this script from anywhere, first cd into this directory, # and then install with forced module mode on and fully qualified name. cd "$(dirname "${0}")" - GO111MODULE=on go install k8s.io/code-generator/cmd/{defaulter-gen,client-gen,lister-gen,informer-gen,deepcopy-gen} + GO111MODULE=on go install k8s.io/code-generator/cmd/{applyconfiguration-gen,defaulter-gen,client-gen,lister-gen,informer-gen,deepcopy-gen} ) # Go installs the above commands to get installed in $GOBIN if defined, and $GOPATH/bin otherwise: GOBIN="$(go env GOBIN)" @@ -77,13 +77,25 @@ if [ "${GENS}" = "all" ] || grep -qw "deepcopy" <<<"${GENS}"; then "$@" fi +if [ "${GENS}" = "all" ] || grep -qw "applyconfiguration" <<<"${GENS}"; then + echo "Generating apply configuration for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/${APPLYCONFIGURATION_PKG_NAME:-applyconfiguration}" + "${gobin}/applyconfiguration-gen" \ + --input-dirs "$(codegen::join , "${FQ_APIS[@]}")" \ + --output-package "${OUTPUT_PKG}/${APPLYCONFIGURATION_PKG_NAME:-applyconfiguration}" \ + "$@" +fi + if [ "${GENS}" = "all" ] || grep -qw "client" <<<"${GENS}"; then echo "Generating clientset for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}" + if [ "${GENS}" = "all" ] || grep -qw "applyconfiguration" <<<"${GENS}"; then + APPLY_CONFIGURATION_PACKAGE="${OUTPUT_PKG}/${APPLYCONFIGURATION_PKG_NAME:-applyconfiguration}" + fi "${gobin}/client-gen" \ --clientset-name "${CLIENTSET_NAME_VERSIONED:-versioned}" \ --input-base "" \ --input "$(codegen::join , "${FQ_APIS[@]}")" \ --output-package "${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}" \ + --apply-configuration-package "${APPLY_CONFIGURATION_PACKAGE:-}" \ "$@" fi diff --git a/vendor/k8s.io/code-generator/generate-internal-groups.sh b/vendor/k8s.io/code-generator/generate-internal-groups.sh index 5dfda87923..3dd2c23166 100644 --- a/vendor/k8s.io/code-generator/generate-internal-groups.sh +++ b/vendor/k8s.io/code-generator/generate-internal-groups.sh @@ -54,18 +54,20 @@ shift 5 GO111MODULE=on go install k8s.io/code-generator/cmd/{defaulter-gen,conversion-gen,client-gen,lister-gen,informer-gen,deepcopy-gen,openapi-gen} ) +# Go installs the above commands to get installed in $GOBIN if defined, and $GOPATH/bin otherwise: +GOBIN="$(go env GOBIN)" +gobin="${GOBIN:-$(go env GOPATH)/bin}" + function codegen::join() { local IFS="$1"; shift; echo "$*"; } # enumerate group versions ALL_FQ_APIS=() # e.g. k8s.io/kubernetes/pkg/apis/apps k8s.io/api/apps/v1 -INT_FQ_APIS=() # e.g. k8s.io/kubernetes/pkg/apis/apps EXT_FQ_APIS=() # e.g. k8s.io/api/apps/v1 for GVs in ${GROUPS_WITH_VERSIONS}; do IFS=: read -r G Vs <<<"${GVs}" if [ -n "${INT_APIS_PKG}" ]; then ALL_FQ_APIS+=("${INT_APIS_PKG}/${G}") - INT_FQ_APIS+=("${INT_APIS_PKG}/${G}") fi # enumerate versions @@ -77,37 +79,28 @@ done if [ "${GENS}" = "all" ] || grep -qw "deepcopy" <<<"${GENS}"; then echo "Generating deepcopy funcs" - "${GOPATH}/bin/deepcopy-gen" \ + "${gobin}/deepcopy-gen" \ --input-dirs "$(codegen::join , "${ALL_FQ_APIS[@]}")" -O zz_generated.deepcopy \ "$@" fi if [ "${GENS}" = "all" ] || grep -qw "defaulter" <<<"${GENS}"; then echo "Generating defaulters" - "${GOPATH}/bin/defaulter-gen" \ + "${gobin}/defaulter-gen" \ --input-dirs "$(codegen::join , "${EXT_FQ_APIS[@]}")" -O zz_generated.defaults \ "$@" fi if [ "${GENS}" = "all" ] || grep -qw "conversion" <<<"${GENS}"; then echo "Generating conversions" - "${GOPATH}/bin/conversion-gen" \ + "${gobin}/conversion-gen" \ --input-dirs "$(codegen::join , "${ALL_FQ_APIS[@]}")" -O zz_generated.conversion \ "$@" fi if [ "${GENS}" = "all" ] || grep -qw "client" <<<"${GENS}"; then echo "Generating clientset for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}" - if [ -n "${INT_APIS_PKG}" ]; then - IFS=" " read -r -a APIS <<< "$(printf '%s/ ' "${INT_FQ_APIS[@]}")" - "${GOPATH}/bin/client-gen" \ - --clientset-name "${CLIENTSET_NAME_INTERNAL:-internalversion}" \ - --input-base "" \ - --input "$(codegen::join , "${APIS[@]}")" \ - --output-package "${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}" \ - "$@" - fi - "${GOPATH}/bin/client-gen" \ + "${gobin}/client-gen" \ --clientset-name "${CLIENTSET_NAME_VERSIONED:-versioned}" \ --input-base "" \ --input "$(codegen::join , "${EXT_FQ_APIS[@]}")" \ @@ -117,18 +110,17 @@ fi if [ "${GENS}" = "all" ] || grep -qw "lister" <<<"${GENS}"; then echo "Generating listers for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/listers" - "${GOPATH}/bin/lister-gen" \ - --input-dirs "$(codegen::join , "${ALL_FQ_APIS[@]}")" \ + "${gobin}/lister-gen" \ + --input-dirs "$(codegen::join , "${EXT_FQ_APIS[@]}")" \ --output-package "${OUTPUT_PKG}/listers" \ "$@" fi if [ "${GENS}" = "all" ] || grep -qw "informer" <<<"${GENS}"; then echo "Generating informers for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/informers" - "${GOPATH}/bin/informer-gen" \ - --input-dirs "$(codegen::join , "${ALL_FQ_APIS[@]}")" \ + "${gobin}/informer-gen" \ + --input-dirs "$(codegen::join , "${EXT_FQ_APIS[@]}")" \ --versioned-clientset-package "${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}/${CLIENTSET_NAME_VERSIONED:-versioned}" \ - --internal-clientset-package "${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}/${CLIENTSET_NAME_INTERNAL:-internalversion}" \ --listers-package "${OUTPUT_PKG}/listers" \ --output-package "${OUTPUT_PKG}/informers" \ "$@" @@ -137,7 +129,7 @@ fi if [ "${GENS}" = "all" ] || grep -qw "openapi" <<<"${GENS}"; then echo "Generating OpenAPI definitions for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/openapi" declare -a OPENAPI_EXTRA_PACKAGES - "${GOPATH}/bin/openapi-gen" \ + "${gobin}/openapi-gen" \ --input-dirs "$(codegen::join , "${EXT_FQ_APIS[@]}" "${OPENAPI_EXTRA_PACKAGES[@]+"${OPENAPI_EXTRA_PACKAGES[@]}"}")" \ --input-dirs "k8s.io/apimachinery/pkg/apis/meta/v1,k8s.io/apimachinery/pkg/runtime,k8s.io/apimachinery/pkg/version" \ --output-package "${OUTPUT_PKG}/openapi" \ diff --git a/vendor/k8s.io/code-generator/tools.go b/vendor/k8s.io/code-generator/tools.go index fb797be032..90b942b070 100644 --- a/vendor/k8s.io/code-generator/tools.go +++ b/vendor/k8s.io/code-generator/tools.go @@ -22,6 +22,7 @@ limitations under the License. package codegenerator import ( + _ "k8s.io/code-generator/cmd/applyconfiguration-gen" _ "k8s.io/code-generator/cmd/client-gen" _ "k8s.io/code-generator/cmd/conversion-gen" _ "k8s.io/code-generator/cmd/deepcopy-gen" diff --git a/vendor/k8s.io/component-base/config/types.go b/vendor/k8s.io/component-base/config/types.go index aad605eeef..e1b9469d76 100644 --- a/vendor/k8s.io/component-base/config/types.go +++ b/vendor/k8s.io/component-base/config/types.go @@ -74,7 +74,7 @@ type LeaderElectionConfiguration struct { type DebuggingConfiguration struct { // enableProfiling enables profiling via web interface host:port/debug/pprof/ EnableProfiling bool - // enableContentionProfiling enables lock contention profiling, if + // enableContentionProfiling enables block profiling, if // enableProfiling is true. EnableContentionProfiling bool } diff --git a/vendor/k8s.io/component-base/config/v1alpha1/types.go b/vendor/k8s.io/component-base/config/v1alpha1/types.go index c9d05525d4..3c5f004f27 100644 --- a/vendor/k8s.io/component-base/config/v1alpha1/types.go +++ b/vendor/k8s.io/component-base/config/v1alpha1/types.go @@ -60,7 +60,7 @@ type LeaderElectionConfiguration struct { type DebuggingConfiguration struct { // enableProfiling enables profiling via web interface host:port/debug/pprof/ EnableProfiling *bool `json:"enableProfiling,omitempty"` - // enableContentionProfiling enables lock contention profiling, if + // enableContentionProfiling enables block profiling, if // enableProfiling is true. EnableContentionProfiling *bool `json:"enableContentionProfiling,omitempty"` } diff --git a/vendor/k8s.io/klog/v2/format.go b/vendor/k8s.io/klog/v2/format.go new file mode 100644 index 0000000000..63995ca6db --- /dev/null +++ b/vendor/k8s.io/klog/v2/format.go @@ -0,0 +1,65 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/go-logr/logr" +) + +// Format wraps a value of an arbitrary type and implement fmt.Stringer and +// logr.Marshaler for them. Stringer returns pretty-printed JSON. MarshalLog +// returns the original value with a type that has no special methods, in +// particular no MarshalLog or MarshalJSON. +// +// Wrapping values like that is useful when the value has a broken +// implementation of these special functions (for example, a type which +// inherits String from TypeMeta, but then doesn't re-implement String) or the +// implementation produces output that is less readable or unstructured (for +// example, the generated String functions for Kubernetes API types). +func Format(obj interface{}) interface{} { + return formatAny{Object: obj} +} + +type formatAny struct { + Object interface{} +} + +func (f formatAny) String() string { + var buffer strings.Builder + encoder := json.NewEncoder(&buffer) + encoder.SetIndent("", " ") + if err := encoder.Encode(&f.Object); err != nil { + return fmt.Sprintf("error marshaling %T to JSON: %v", f, err) + } + return buffer.String() +} + +func (f formatAny) MarshalLog() interface{} { + // Returning a pointer to a pointer ensures that zapr doesn't find a + // fmt.Stringer or logr.Marshaler when it checks the type of the + // value. It then falls back to reflection, which dumps the value being + // pointed to (JSON doesn't have pointers). + ptr := &f.Object + return &ptr +} + +var _ fmt.Stringer = formatAny{} +var _ logr.Marshaler = formatAny{} diff --git a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go index 1dc81a15fa..bcdf5f8ee1 100644 --- a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go +++ b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go @@ -18,6 +18,7 @@ package serialize import ( "bytes" + "encoding/json" "fmt" "strconv" @@ -196,11 +197,11 @@ func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) { case textWriter: writeTextWriterValue(b, v) case fmt.Stringer: - writeStringValue(b, true, StringerToString(v)) + writeStringValue(b, StringerToString(v)) case string: - writeStringValue(b, true, v) + writeStringValue(b, v) case error: - writeStringValue(b, true, ErrorToString(v)) + writeStringValue(b, ErrorToString(v)) case logr.Marshaler: value := MarshalerToValue(v) // A marshaler that returns a string is useful for @@ -215,9 +216,9 @@ func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) { // value directly. switch value := value.(type) { case string: - writeStringValue(b, true, value) + writeStringValue(b, value) default: - writeStringValue(b, false, f.AnyToString(value)) + f.formatAny(b, value) } case []byte: // In https://github.com/kubernetes/klog/pull/237 it was decided @@ -234,7 +235,7 @@ func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) { b.WriteByte('=') b.WriteString(fmt.Sprintf("%+q", v)) default: - writeStringValue(b, false, f.AnyToString(v)) + f.formatAny(b, v) } } @@ -242,12 +243,25 @@ func KVFormat(b *bytes.Buffer, k, v interface{}) { Formatter{}.KVFormat(b, k, v) } -// AnyToString is the historic fallback formatter. -func (f Formatter) AnyToString(v interface{}) string { +// formatAny is the fallback formatter for a value. It supports a hook (for +// example, for YAML encoding) and itself uses JSON encoding. +func (f Formatter) formatAny(b *bytes.Buffer, v interface{}) { + b.WriteRune('=') if f.AnyToStringHook != nil { - return f.AnyToStringHook(v) + b.WriteString(f.AnyToStringHook(v)) + return + } + encoder := json.NewEncoder(b) + l := b.Len() + if err := encoder.Encode(v); err != nil { + // This shouldn't happen. We discard whatever the encoder + // wrote and instead dump an error string. + b.Truncate(l) + b.WriteString(fmt.Sprintf(`""`, err)) + return } - return fmt.Sprintf("%+v", v) + // Remove trailing newline. + b.Truncate(b.Len() - 1) } // StringerToString converts a Stringer to a string, @@ -287,7 +301,7 @@ func ErrorToString(err error) (ret string) { } func writeTextWriterValue(b *bytes.Buffer, v textWriter) { - b.WriteRune('=') + b.WriteByte('=') defer func() { if err := recover(); err != nil { fmt.Fprintf(b, `""`, err) @@ -296,18 +310,13 @@ func writeTextWriterValue(b *bytes.Buffer, v textWriter) { v.WriteText(b) } -func writeStringValue(b *bytes.Buffer, quote bool, v string) { +func writeStringValue(b *bytes.Buffer, v string) { data := []byte(v) index := bytes.IndexByte(data, '\n') if index == -1 { b.WriteByte('=') - if quote { - // Simple string, quote quotation marks and non-printable characters. - b.WriteString(strconv.Quote(v)) - return - } - // Non-string with no line breaks. - b.WriteString(v) + // Simple string, quote quotation marks and non-printable characters. + b.WriteString(strconv.Quote(v)) return } diff --git a/vendor/k8s.io/klog/v2/k8s_references.go b/vendor/k8s.io/klog/v2/k8s_references.go index ecd3f8b690..786af74bfd 100644 --- a/vendor/k8s.io/klog/v2/k8s_references.go +++ b/vendor/k8s.io/klog/v2/k8s_references.go @@ -178,14 +178,14 @@ func (ks kobjSlice) process() (objs []interface{}, err string) { return objectRefs, "" } -var nilToken = []byte("") +var nilToken = []byte("null") func (ks kobjSlice) WriteText(out *bytes.Buffer) { s := reflect.ValueOf(ks.arg) switch s.Kind() { case reflect.Invalid: - // nil parameter, print as empty slice. - out.WriteString("[]") + // nil parameter, print as null. + out.Write(nilToken) return case reflect.Slice: // Okay, handle below. @@ -197,15 +197,15 @@ func (ks kobjSlice) WriteText(out *bytes.Buffer) { defer out.Write([]byte{']'}) for i := 0; i < s.Len(); i++ { if i > 0 { - out.Write([]byte{' '}) + out.Write([]byte{','}) } item := s.Index(i).Interface() if item == nil { out.Write(nilToken) } else if v, ok := item.(KMetadata); ok { - KObj(v).writeUnquoted(out) + KObj(v).WriteText(out) } else { - fmt.Fprintf(out, "", item) + fmt.Fprintf(out, `""`, item) return } } diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go index 466eeaf265..152f8a6bd6 100644 --- a/vendor/k8s.io/klog/v2/klog.go +++ b/vendor/k8s.io/klog/v2/klog.go @@ -1228,6 +1228,19 @@ func CopyStandardLogTo(name string) { stdLog.SetOutput(logBridge(sev)) } +// NewStandardLogger returns a Logger that writes to the klog logs for the +// named and lower severities. +// +// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not +// recognized, NewStandardLogger panics. +func NewStandardLogger(name string) *stdLog.Logger { + sev, ok := severity.ByName(name) + if !ok { + panic(fmt.Sprintf("klog.NewStandardLogger(%q): unknown severity", name)) + } + return stdLog.New(logBridge(sev), "", stdLog.Lshortfile) +} + // logBridge provides the Write method that enables CopyStandardLogTo to connect // Go's standard logs to the logs provided by this package. type logBridge severity.Severity diff --git a/vendor/k8s.io/kube-openapi/pkg/cached/cache.go b/vendor/k8s.io/kube-openapi/pkg/cached/cache.go index 16e34853af..3972cd5f4c 100644 --- a/vendor/k8s.io/kube-openapi/pkg/cached/cache.go +++ b/vendor/k8s.io/kube-openapi/pkg/cached/cache.go @@ -37,8 +37,11 @@ limitations under the License. // # Atomicity // // Most of the operations are not atomic/thread-safe, except for -// [Replaceable.Replace] which can be performed while the objects -// are being read. +// [Replaceable.Replace] which can be performed while the objects are +// being read. Specifically, `Get` methods are NOT thread-safe. Never +// call `Get()` without a lock on a multi-threaded environment, since +// it's usually performing updates to caches that will require write +// operations. // // # Etags // @@ -97,6 +100,13 @@ func (r Result[T]) Get() Result[T] { type Data[T any] interface { // Returns the cached data, as well as an "etag" to identify the // version of the cache, or an error if something happened. + // + // # Important note + // + // This method is NEVER thread-safe, never assume it is OK to + // call `Get()` without holding a proper mutex in a + // multi-threaded environment, especially since `Get()` will + // usually update the cache and perform write operations. Get() Result[T] } @@ -249,6 +259,13 @@ type Replaceable[T any] struct { // previously had returned a success, that success will be returned // instead. If the cache fails but we never returned a success, that // failure is returned. +// +// # Important note +// +// As all implementations of Get, this implementation is NOT +// thread-safe. Please properly lock a mutex before calling this method +// if you are in a multi-threaded environment, since this method will +// update the cache and perform write operations. func (c *Replaceable[T]) Get() Result[T] { result := (*c.cache.Load()).Get() if result.Err != nil && c.result != nil && c.result.Err == nil { diff --git a/vendor/k8s.io/utils/trace/trace.go b/vendor/k8s.io/utils/trace/trace.go index a0b07a6d78..187eb5d8c5 100644 --- a/vendor/k8s.io/utils/trace/trace.go +++ b/vendor/k8s.io/utils/trace/trace.go @@ -65,6 +65,11 @@ func durationToMilliseconds(timeDuration time.Duration) int64 { } type traceItem interface { + // rLock must be called before invoking time or writeItem. + rLock() + // rUnlock must be called after processing the item is complete. + rUnlock() + // time returns when the trace was recorded as completed. time() time.Time // writeItem outputs the traceItem to the buffer. If stepThreshold is non-nil, only output the @@ -79,6 +84,10 @@ type traceStep struct { fields []Field } +// rLock doesn't need to do anything because traceStep instances are immutable. +func (s traceStep) rLock() {} +func (s traceStep) rUnlock() {} + func (s traceStep) time() time.Time { return s.stepTime } @@ -106,6 +115,14 @@ type Trace struct { traceItems []traceItem } +func (t *Trace) rLock() { + t.lock.RLock() +} + +func (t *Trace) rUnlock() { + t.lock.RUnlock() +} + func (t *Trace) time() time.Time { if t.endTime != nil { return *t.endTime @@ -231,8 +248,10 @@ func (t *Trace) logTrace() { func (t *Trace) writeTraceSteps(b *bytes.Buffer, formatter string, stepThreshold *time.Duration) { lastStepTime := t.startTime for _, stepOrTrace := range t.traceItems { + stepOrTrace.rLock() stepOrTrace.writeItem(b, formatter, lastStepTime, stepThreshold) lastStepTime = stepOrTrace.time() + stepOrTrace.rUnlock() } } diff --git a/vendor/knative.dev/pkg/apis/OWNERS b/vendor/knative.dev/pkg/apis/OWNERS index 82f0814ccd..13014203fc 100644 --- a/vendor/knative.dev/pkg/apis/OWNERS +++ b/vendor/knative.dev/pkg/apis/OWNERS @@ -1,9 +1,15 @@ # The OWNERS file is used by prow to automatically merge approved PRs. approvers: -- mattmoor -- vaikas -- n3wscott +- technical-oversight-committee +- serving-wg-leads +- eventing-wg-leads reviewers: -- whaught +- serving-writers +- eventing-writers +- eventing-reviewers +- serving-reviewers + +options: + no_parent_owners: true diff --git a/vendor/knative.dev/pkg/apis/duck/OWNERS b/vendor/knative.dev/pkg/apis/duck/OWNERS index f6267f14fe..af1eb05dac 100644 --- a/vendor/knative.dev/pkg/apis/duck/OWNERS +++ b/vendor/knative.dev/pkg/apis/duck/OWNERS @@ -1,8 +1,8 @@ # The OWNERS file is used by prow to automatically merge approved PRs. approvers: -- mattmoor -- vaikas +- eventing-wg-leads reviewers: -- whaught +- eventing-reviewers +- eventing-writers diff --git a/vendor/knative.dev/pkg/apis/duck/v1/addressable_types.go b/vendor/knative.dev/pkg/apis/duck/v1/addressable_types.go index 83e746a001..bdcb994fac 100644 --- a/vendor/knative.dev/pkg/apis/duck/v1/addressable_types.go +++ b/vendor/knative.dev/pkg/apis/duck/v1/addressable_types.go @@ -38,7 +38,16 @@ import ( // typically stored in the object's `status`, as this information may // be generated by the controller. type Addressable struct { + // Name is the name of the address. + // +optional + Name *string `json:"name,omitempty"` + URL *apis.URL `json:"url,omitempty"` + + // CACerts is the Certification Authority (CA) certificates in PEM format + // according to https://www.rfc-editor.org/rfc/rfc7468. + // +optional + CACerts *string `json:"CACerts,omitempty"` } var ( @@ -62,7 +71,15 @@ type AddressableType struct { // AddressStatus shows how we expect folks to embed Addressable in // their Status field. type AddressStatus struct { + // Address is a single Addressable address. + // If Addresses is present, Address will be ignored by clients. + // +optional Address *Addressable `json:"address,omitempty"` + + // Addresses is a list of addresses for different protocols (HTTP and HTTPS) + // If Addresses is present, Address must be ignored by clients. + // +optional + Addresses []Addressable `json:"addresses,omitempty"` } // Verify AddressableType resources meet duck contracts. @@ -89,9 +106,11 @@ func (a *Addressable) ConvertFrom(ctx context.Context, from apis.Convertible) er // Populate implements duck.Populatable func (t *AddressableType) Populate() { + name := "http" t.Status = AddressStatus{ - &Addressable{ + Address: &Addressable{ // Populate ALL fields + Name: &name, URL: &apis.URL{ Scheme: "http", Host: "foo.com", diff --git a/vendor/knative.dev/pkg/apis/duck/v1/destination.go b/vendor/knative.dev/pkg/apis/duck/v1/destination.go index 87f0983fd0..15638f4018 100644 --- a/vendor/knative.dev/pkg/apis/duck/v1/destination.go +++ b/vendor/knative.dev/pkg/apis/duck/v1/destination.go @@ -31,6 +31,13 @@ type Destination struct { // URI can be an absolute URL(non-empty scheme and non-empty host) pointing to the target or a relative URI. Relative URIs will be resolved using the base URI retrieved from Ref. // +optional URI *apis.URL `json:"uri,omitempty"` + + // CACerts are Certification Authority (CA) certificates in PEM format + // according to https://www.rfc-editor.org/rfc/rfc7468. + // If set, these CAs are appended to the set of CAs provided + // by the Addressable target, if any. + // +optional + CACerts *string `json:"CACerts,omitempty"` } // Validate the Destination has all the necessary fields and check the @@ -73,6 +80,10 @@ func (d *Destination) GetRef() *KReference { } func (d *Destination) SetDefaults(ctx context.Context) { + if d == nil { + return + } + if d.Ref != nil && d.Ref.Namespace == "" { d.Ref.Namespace = apis.ParentMeta(ctx).Namespace } diff --git a/vendor/knative.dev/pkg/apis/duck/v1/knative_reference.go b/vendor/knative.dev/pkg/apis/duck/v1/knative_reference.go index a0b169d6f8..c723c147a9 100644 --- a/vendor/knative.dev/pkg/apis/duck/v1/knative_reference.go +++ b/vendor/knative.dev/pkg/apis/duck/v1/knative_reference.go @@ -49,6 +49,10 @@ type KReference struct { // Note: This API is EXPERIMENTAL and might break anytime. For more details: https://github.com/knative/eventing/issues/5086 // +optional Group string `json:"group,omitempty"` + + // Address points to a specific Address Name. + // +optional + Address *string `json:"address,omitempty"` } func (kr *KReference) Validate(ctx context.Context) *apis.FieldError { diff --git a/vendor/knative.dev/pkg/apis/duck/v1/zz_generated.deepcopy.go b/vendor/knative.dev/pkg/apis/duck/v1/zz_generated.deepcopy.go index 96638e5799..83cccfd3b8 100644 --- a/vendor/knative.dev/pkg/apis/duck/v1/zz_generated.deepcopy.go +++ b/vendor/knative.dev/pkg/apis/duck/v1/zz_generated.deepcopy.go @@ -34,6 +34,13 @@ func (in *AddressStatus) DeepCopyInto(out *AddressStatus) { *out = new(Addressable) (*in).DeepCopyInto(*out) } + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]Addressable, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -50,11 +57,21 @@ func (in *AddressStatus) DeepCopy() *AddressStatus { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Addressable) DeepCopyInto(out *Addressable) { *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } if in.URL != nil { in, out := &in.URL, &out.URL *out = new(apis.URL) (*in).DeepCopyInto(*out) } + if in.CACerts != nil { + in, out := &in.CACerts, &out.CACerts + *out = new(string) + **out = **in + } return } @@ -332,13 +349,18 @@ func (in *Destination) DeepCopyInto(out *Destination) { if in.Ref != nil { in, out := &in.Ref, &out.Ref *out = new(KReference) - **out = **in + (*in).DeepCopyInto(*out) } if in.URI != nil { in, out := &in.URI, &out.URI *out = new(apis.URL) (*in).DeepCopyInto(*out) } + if in.CACerts != nil { + in, out := &in.CACerts, &out.CACerts + *out = new(string) + **out = **in + } return } @@ -355,6 +377,11 @@ func (in *Destination) DeepCopy() *Destination { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KReference) DeepCopyInto(out *KReference) { *out = *in + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(string) + **out = **in + } return } diff --git a/vendor/knative.dev/pkg/configmap/OWNERS b/vendor/knative.dev/pkg/configmap/OWNERS deleted file mode 100644 index c3ff67e25b..0000000000 --- a/vendor/knative.dev/pkg/configmap/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -# The OWNERS file is used by prow to automatically merge approved PRs. - -approvers: -- pkg-configmap-writers -reviewers: -- pkg-configmap-reviewers diff --git a/vendor/knative.dev/pkg/kmeta/OWNERS b/vendor/knative.dev/pkg/kmeta/OWNERS deleted file mode 100644 index a1e0f822aa..0000000000 --- a/vendor/knative.dev/pkg/kmeta/OWNERS +++ /dev/null @@ -1,5 +0,0 @@ -# The OWNERS file is used by prow to automatically merge approved PRs. - -approvers: -- mattmoor -- vagababov diff --git a/vendor/knative.dev/pkg/logging/OWNERS b/vendor/knative.dev/pkg/logging/OWNERS deleted file mode 100644 index f51b91be77..0000000000 --- a/vendor/knative.dev/pkg/logging/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -# The OWNERS file is used by prow to automatically merge approved PRs. - -approvers: -- mdemirhan -- n3wscott -- yanweiguo diff --git a/vendor/knative.dev/pkg/metrics/OWNERS b/vendor/knative.dev/pkg/metrics/OWNERS deleted file mode 100644 index 7cd699f27e..0000000000 --- a/vendor/knative.dev/pkg/metrics/OWNERS +++ /dev/null @@ -1,10 +0,0 @@ -# The OWNERS file is used by prow to automatically merge approved PRs. - -approvers: -- mdemirhan -- yanweiguo - -reviewers: -- mdemirhan -- yanweiguo -- skonto diff --git a/vendor/knative.dev/pkg/metrics/config.go b/vendor/knative.dev/pkg/metrics/config.go index 3fe3c1737d..ce5e7f875f 100644 --- a/vendor/knative.dev/pkg/metrics/config.go +++ b/vendor/knative.dev/pkg/metrics/config.go @@ -41,19 +41,21 @@ const ( DomainEnv = "METRICS_DOMAIN" // The following keys are used to configure metrics reporting. - // See https://github.com/knative/serving/blob/main/config/config-observability.yaml + // See https://github.com/knative/serving/blob/main/config/core/configmaps/observability.yaml // for details. collectorAddressKey = "metrics.opencensus-address" collectorSecureKey = "metrics.opencensus-require-tls" reportingPeriodKey = "metrics.reporting-period-seconds" - defaultBackendEnvName = "DEFAULT_METRICS_BACKEND" - defaultPrometheusPort = 9090 - maxPrometheusPort = 65535 - minPrometheusPort = 1024 - defaultPrometheusHost = "0.0.0.0" - prometheusPortEnvName = "METRICS_PROMETHEUS_PORT" - prometheusHostEnvName = "METRICS_PROMETHEUS_HOST" + defaultBackendEnvName = "DEFAULT_METRICS_BACKEND" + defaultPrometheusPort = 9090 + defaultPrometheusReportingPeriod = 5 + defaultOpenCensusReportingPeriod = 60 + maxPrometheusPort = 65535 + minPrometheusPort = 1024 + defaultPrometheusHost = "0.0.0.0" + prometheusPortEnvName = "METRICS_PROMETHEUS_PORT" + prometheusHostEnvName = "METRICS_PROMETHEUS_HOST" ) var ( @@ -206,9 +208,9 @@ func createMetricsConfig(_ context.Context, ops ExporterOptions) (*metricsConfig } else { switch mc.backendDestination { case openCensus: - mc.reportingPeriod = time.Minute + mc.reportingPeriod = defaultOpenCensusReportingPeriod * time.Second case prometheus: - mc.reportingPeriod = 5 * time.Second + mc.reportingPeriod = defaultPrometheusReportingPeriod * time.Second } } return &mc, nil diff --git a/vendor/knative.dev/pkg/metrics/config_observability.go b/vendor/knative.dev/pkg/metrics/config_observability.go index b6affd2936..e766b071b6 100644 --- a/vendor/knative.dev/pkg/metrics/config_observability.go +++ b/vendor/knative.dev/pkg/metrics/config_observability.go @@ -71,6 +71,10 @@ type ObservabilityConfig struct { // OpenCensus. "None" disables all backends. RequestMetricsBackend string + // RequestMetricsReportingPeriodSeconds specifies the request metrics reporting period in sec at queue proxy, eg 1. + // If a zero or negative value is passed the default reporting period is used (10 secs). + RequestMetricsReportingPeriodSeconds int + // EnableProfiling indicates whether it is allowed to retrieve runtime profiling data from // the pods via an HTTP server in the format expected by the pprof visualization tool. EnableProfiling bool @@ -114,6 +118,12 @@ func NewObservabilityConfigFromConfigMap(configMap *corev1.ConfigMap) (*Observab return oc, nil } + defaultRequestMetricsReportingPeriod, err := getDefaultRequestMetricsReportingPeriod(configMap.Data) + if err != nil { + return nil, err + } + oc.RequestMetricsReportingPeriodSeconds = defaultRequestMetricsReportingPeriod + if err := cm.Parse(configMap.Data, cm.AsBool("logging.enable-var-log-collection", &oc.EnableVarLogCollection), cm.AsString("logging.revision-url-template", &oc.LoggingURLTemplate), @@ -121,6 +131,7 @@ func NewObservabilityConfigFromConfigMap(configMap *corev1.ConfigMap) (*Observab cm.AsBool(EnableReqLogKey, &oc.EnableRequestLog), cm.AsBool(EnableProbeReqLogKey, &oc.EnableProbeRequestLog), cm.AsString("metrics.request-metrics-backend-destination", &oc.RequestMetricsBackend), + cm.AsInt("metrics.request-metrics-reporting-period-seconds", &oc.RequestMetricsReportingPeriodSeconds), cm.AsBool("profiling.enable", &oc.EnableProfiling), cm.AsString("metrics.opencensus-address", &oc.MetricsCollectorAddress), ); err != nil { @@ -163,3 +174,27 @@ func ConfigMapName() string { } return "config-observability" } + +// Use the same as `metrics.reporting-period-seconds` for the default +// of `metrics.request-metrics-reporting-period-seconds` +func getDefaultRequestMetricsReportingPeriod(data map[string]string) (int, error) { + // Default backend is prometheus + period := defaultPrometheusReportingPeriod + if repStr := data[reportingPeriodKey]; repStr != "" { + repInt, err := strconv.Atoi(repStr) + if err != nil { + return -1, fmt.Errorf("invalid %s value %q", reportingPeriodKey, repStr) + } + period = repInt + } else { + if raw, ok := data["metrics.request-metrics-backend-destination"]; ok { + switch metricsBackend(raw) { + case prometheus: + period = defaultPrometheusReportingPeriod + case openCensus: + period = defaultOpenCensusReportingPeriod + } + } + } + return period, nil +} diff --git a/vendor/knative.dev/pkg/metrics/opencensus_exporter.go b/vendor/knative.dev/pkg/metrics/opencensus_exporter.go index eaeac4a566..59e33ab099 100644 --- a/vendor/knative.dev/pkg/metrics/opencensus_exporter.go +++ b/vendor/knative.dev/pkg/metrics/opencensus_exporter.go @@ -99,7 +99,7 @@ func getCredentials(component string, secret *corev1.Secret, logger *zap.Sugared return nil } return credentials.NewTLS(&tls.Config{ - MinVersion: tls.VersionTLS12, + MinVersion: tls.VersionTLS13, GetClientCertificate: func(*tls.CertificateRequestInfo) (*tls.Certificate, error) { cert, err := tls.X509KeyPair(secret.Data["client-cert.pem"], secret.Data["client-key.pem"]) if err != nil { diff --git a/vendor/knative.dev/pkg/metrics/prometheus_exporter.go b/vendor/knative.dev/pkg/metrics/prometheus_exporter.go index aec6c09a09..5f11c12e8d 100644 --- a/vendor/knative.dev/pkg/metrics/prometheus_exporter.go +++ b/vendor/knative.dev/pkg/metrics/prometheus_exporter.go @@ -20,6 +20,7 @@ import ( "net/http" "strconv" "sync" + "time" prom "contrib.go.opencensus.io/exporter/prometheus" "go.opencensus.io/resource" @@ -82,10 +83,10 @@ func startNewPromSrv(e *prom.Exporter, host string, port int) *http.Server { if curPromSrv != nil { curPromSrv.Close() } - //nolint:gosec curPromSrv = &http.Server{ - Addr: host + ":" + strconv.Itoa(port), - Handler: sm, + Addr: host + ":" + strconv.Itoa(port), + Handler: sm, + ReadHeaderTimeout: time.Minute, //https://medium.com/a-journey-with-go/go-understand-and-mitigate-slowloris-attack-711c1b1403f6 } return curPromSrv } diff --git a/vendor/modules.txt b/vendor/modules.txt index 8b487b915b..8520b2235e 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -91,7 +91,7 @@ github.com/docker/docker/pkg/homedir ## explicit; go 1.18 github.com/docker/docker-credential-helpers/client github.com/docker/docker-credential-helpers/credentials -# github.com/emicklei/go-restful/v3 v3.9.0 +# github.com/emicklei/go-restful/v3 v3.10.2 ## explicit; go 1.13 github.com/emicklei/go-restful/v3 github.com/emicklei/go-restful/v3/log @@ -185,13 +185,13 @@ github.com/go-logfmt/logfmt ## explicit; go 1.18 github.com/go-logr/logr github.com/go-logr/logr/funcr -# github.com/go-logr/zapr v1.2.3 +# github.com/go-logr/zapr v1.2.4 ## explicit; go 1.16 github.com/go-logr/zapr # github.com/go-openapi/jsonpointer v0.19.6 ## explicit; go 1.13 github.com/go-openapi/jsonpointer -# github.com/go-openapi/jsonreference v0.20.1 +# github.com/go-openapi/jsonreference v0.20.2 ## explicit; go 1.13 github.com/go-openapi/jsonreference github.com/go-openapi/jsonreference/internal @@ -366,7 +366,7 @@ github.com/onsi/gomega/types # github.com/opencontainers/go-digest v1.0.0 ## explicit; go 1.13 github.com/opencontainers/go-digest -# github.com/opencontainers/image-spec v1.1.0-rc3 +# github.com/opencontainers/image-spec v1.1.0-rc4 ## explicit; go 1.18 github.com/opencontainers/image-spec/specs-go github.com/opencontainers/image-spec/specs-go/v1 @@ -417,7 +417,7 @@ github.com/spf13/cobra # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag -# github.com/tektoncd/pipeline v0.47.4 +# github.com/tektoncd/pipeline v0.50.5 ## explicit; go 1.19 github.com/tektoncd/pipeline/pkg/apis/config github.com/tektoncd/pipeline/pkg/apis/pipeline @@ -588,10 +588,10 @@ golang.org/x/tools/internal/tokeninternal golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal golang.org/x/tools/internal/versions -# gomodules.xyz/jsonpatch/v2 v2.2.0 -## explicit; go 1.12 +# gomodules.xyz/jsonpatch/v2 v2.3.0 +## explicit; go 1.20 gomodules.xyz/jsonpatch/v2 -# google.golang.org/api v0.122.0 +# google.golang.org/api v0.128.0 ## explicit; go 1.19 google.golang.org/api/support/bundler # google.golang.org/appengine v1.6.7 @@ -714,8 +714,8 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.26.9 -## explicit; go 1.19 +# k8s.io/api v0.27.11 +## explicit; go 1.20 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 k8s.io/api/admissionregistration/v1 @@ -738,6 +738,7 @@ k8s.io/api/autoscaling/v2beta2 k8s.io/api/batch/v1 k8s.io/api/batch/v1beta1 k8s.io/api/certificates/v1 +k8s.io/api/certificates/v1alpha1 k8s.io/api/certificates/v1beta1 k8s.io/api/coordination/v1 k8s.io/api/coordination/v1beta1 @@ -763,20 +764,20 @@ k8s.io/api/policy/v1beta1 k8s.io/api/rbac/v1 k8s.io/api/rbac/v1alpha1 k8s.io/api/rbac/v1beta1 -k8s.io/api/resource/v1alpha1 +k8s.io/api/resource/v1alpha2 k8s.io/api/scheduling/v1 k8s.io/api/scheduling/v1alpha1 k8s.io/api/scheduling/v1beta1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apiextensions-apiserver v0.26.9 -## explicit; go 1.19 +# k8s.io/apiextensions-apiserver v0.27.11 +## explicit; go 1.20 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1 -# k8s.io/apimachinery v0.26.9 -## explicit; go 1.19 +# k8s.io/apimachinery v0.27.11 +## explicit; go 1.20 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/meta @@ -809,6 +810,7 @@ k8s.io/apimachinery/pkg/util/framer k8s.io/apimachinery/pkg/util/intstr k8s.io/apimachinery/pkg/util/json k8s.io/apimachinery/pkg/util/managedfields +k8s.io/apimachinery/pkg/util/managedfields/internal k8s.io/apimachinery/pkg/util/mergepatch k8s.io/apimachinery/pkg/util/naming k8s.io/apimachinery/pkg/util/net @@ -825,8 +827,8 @@ k8s.io/apimachinery/pkg/version k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/client-go v0.26.9 -## explicit; go 1.19 +# k8s.io/client-go v0.27.11 +## explicit; go 1.20 k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1 k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1 @@ -841,6 +843,7 @@ k8s.io/client-go/applyconfigurations/autoscaling/v2beta2 k8s.io/client-go/applyconfigurations/batch/v1 k8s.io/client-go/applyconfigurations/batch/v1beta1 k8s.io/client-go/applyconfigurations/certificates/v1 +k8s.io/client-go/applyconfigurations/certificates/v1alpha1 k8s.io/client-go/applyconfigurations/certificates/v1beta1 k8s.io/client-go/applyconfigurations/coordination/v1 k8s.io/client-go/applyconfigurations/coordination/v1beta1 @@ -867,7 +870,7 @@ k8s.io/client-go/applyconfigurations/policy/v1beta1 k8s.io/client-go/applyconfigurations/rbac/v1 k8s.io/client-go/applyconfigurations/rbac/v1alpha1 k8s.io/client-go/applyconfigurations/rbac/v1beta1 -k8s.io/client-go/applyconfigurations/resource/v1alpha1 +k8s.io/client-go/applyconfigurations/resource/v1alpha2 k8s.io/client-go/applyconfigurations/scheduling/v1 k8s.io/client-go/applyconfigurations/scheduling/v1alpha1 k8s.io/client-go/applyconfigurations/scheduling/v1beta1 @@ -898,6 +901,7 @@ k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2 k8s.io/client-go/kubernetes/typed/batch/v1 k8s.io/client-go/kubernetes/typed/batch/v1beta1 k8s.io/client-go/kubernetes/typed/certificates/v1 +k8s.io/client-go/kubernetes/typed/certificates/v1alpha1 k8s.io/client-go/kubernetes/typed/certificates/v1beta1 k8s.io/client-go/kubernetes/typed/coordination/v1 k8s.io/client-go/kubernetes/typed/coordination/v1beta1 @@ -922,7 +926,7 @@ k8s.io/client-go/kubernetes/typed/policy/v1beta1 k8s.io/client-go/kubernetes/typed/rbac/v1 k8s.io/client-go/kubernetes/typed/rbac/v1alpha1 k8s.io/client-go/kubernetes/typed/rbac/v1beta1 -k8s.io/client-go/kubernetes/typed/resource/v1alpha1 +k8s.io/client-go/kubernetes/typed/resource/v1alpha2 k8s.io/client-go/kubernetes/typed/scheduling/v1 k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1 k8s.io/client-go/kubernetes/typed/scheduling/v1beta1 @@ -947,6 +951,7 @@ k8s.io/client-go/restmapper k8s.io/client-go/testing k8s.io/client-go/tools/auth k8s.io/client-go/tools/cache +k8s.io/client-go/tools/cache/synctrack k8s.io/client-go/tools/clientcmd k8s.io/client-go/tools/clientcmd/api k8s.io/client-go/tools/clientcmd/api/latest @@ -965,9 +970,12 @@ k8s.io/client-go/util/flowcontrol k8s.io/client-go/util/homedir k8s.io/client-go/util/keyutil k8s.io/client-go/util/workqueue -# k8s.io/code-generator v0.26.9 -## explicit; go 1.19 +# k8s.io/code-generator v0.27.11 +## explicit; go 1.20 k8s.io/code-generator +k8s.io/code-generator/cmd/applyconfiguration-gen +k8s.io/code-generator/cmd/applyconfiguration-gen/args +k8s.io/code-generator/cmd/applyconfiguration-gen/generators k8s.io/code-generator/cmd/client-gen k8s.io/code-generator/cmd/client-gen/args k8s.io/code-generator/cmd/client-gen/generators @@ -1000,8 +1008,8 @@ k8s.io/code-generator/cmd/set-gen k8s.io/code-generator/pkg/namer k8s.io/code-generator/pkg/util k8s.io/code-generator/third_party/forked/golang/reflect -# k8s.io/component-base v0.26.9 -## explicit; go 1.19 +# k8s.io/component-base v0.27.11 +## explicit; go 1.20 k8s.io/component-base/config k8s.io/component-base/config/v1alpha1 # k8s.io/gengo v0.0.0-20221011193443-fad74ee6edd9 @@ -1016,7 +1024,7 @@ k8s.io/gengo/generator k8s.io/gengo/namer k8s.io/gengo/parser k8s.io/gengo/types -# k8s.io/klog/v2 v2.90.1 +# k8s.io/klog/v2 v2.100.1 ## explicit; go 1.13 k8s.io/klog/v2 k8s.io/klog/v2/internal/buffer @@ -1024,8 +1032,8 @@ k8s.io/klog/v2/internal/clock k8s.io/klog/v2/internal/dbg k8s.io/klog/v2/internal/serialize k8s.io/klog/v2/internal/severity -# k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a -## explicit; go 1.18 +# k8s.io/kube-openapi v0.0.0-20230515203736-54b630e78af5 +## explicit; go 1.19 k8s.io/kube-openapi/cmd/openapi-gen/args k8s.io/kube-openapi/pkg/builder3/util k8s.io/kube-openapi/pkg/cached @@ -1042,10 +1050,10 @@ k8s.io/kube-openapi/pkg/spec3 k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/util/sets k8s.io/kube-openapi/pkg/validation/spec -# k8s.io/kubectl v0.26.9 -## explicit; go 1.19 +# k8s.io/kubectl v0.27.11 +## explicit; go 1.20 k8s.io/kubectl/pkg/scheme -# k8s.io/utils v0.0.0-20230209194617-a36077c30491 +# k8s.io/utils v0.0.0-20230505201702-9f6742963106 ## explicit; go 1.18 k8s.io/utils/buffer k8s.io/utils/clock @@ -1056,7 +1064,7 @@ k8s.io/utils/net k8s.io/utils/pointer k8s.io/utils/strings/slices k8s.io/utils/trace -# knative.dev/pkg v0.0.0-20230221145627-8efb3485adcf +# knative.dev/pkg v0.0.0-20231011201526-df28feae6d34 ## explicit; go 1.18 knative.dev/pkg/apis knative.dev/pkg/apis/duck @@ -1074,8 +1082,8 @@ knative.dev/pkg/metrics/metricskey knative.dev/pkg/signals knative.dev/pkg/tracker knative.dev/pkg/webhook/resourcesemantics -# sigs.k8s.io/controller-runtime v0.14.6 -## explicit; go 1.19 +# sigs.k8s.io/controller-runtime v0.15.3 +## explicit; go 1.20 sigs.k8s.io/controller-runtime/pkg/cache sigs.k8s.io/controller-runtime/pkg/cache/internal sigs.k8s.io/controller-runtime/pkg/certwatcher @@ -1096,8 +1104,8 @@ sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics sigs.k8s.io/controller-runtime/pkg/internal/field/selector sigs.k8s.io/controller-runtime/pkg/internal/httpserver sigs.k8s.io/controller-runtime/pkg/internal/log -sigs.k8s.io/controller-runtime/pkg/internal/objectutil sigs.k8s.io/controller-runtime/pkg/internal/recorder +sigs.k8s.io/controller-runtime/pkg/internal/source sigs.k8s.io/controller-runtime/pkg/leaderelection sigs.k8s.io/controller-runtime/pkg/log sigs.k8s.io/controller-runtime/pkg/log/zap @@ -1108,10 +1116,8 @@ sigs.k8s.io/controller-runtime/pkg/predicate sigs.k8s.io/controller-runtime/pkg/ratelimiter sigs.k8s.io/controller-runtime/pkg/reconcile sigs.k8s.io/controller-runtime/pkg/recorder -sigs.k8s.io/controller-runtime/pkg/runtime/inject sigs.k8s.io/controller-runtime/pkg/scheme sigs.k8s.io/controller-runtime/pkg/source -sigs.k8s.io/controller-runtime/pkg/source/internal sigs.k8s.io/controller-runtime/pkg/webhook sigs.k8s.io/controller-runtime/pkg/webhook/admission sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics @@ -1122,6 +1128,7 @@ sigs.k8s.io/json/internal/golang/encoding/json # sigs.k8s.io/structured-merge-diff/v4 v4.2.3 ## explicit; go 1.13 sigs.k8s.io/structured-merge-diff/v4/fieldpath +sigs.k8s.io/structured-merge-diff/v4/merge sigs.k8s.io/structured-merge-diff/v4/schema sigs.k8s.io/structured-merge-diff/v4/typed sigs.k8s.io/structured-merge-diff/v4/value diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go index bcb1141a50..7600387047 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go @@ -19,10 +19,11 @@ package cache import ( "context" "fmt" - "reflect" + "net/http" "time" "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" @@ -37,7 +38,10 @@ import ( logf "sigs.k8s.io/controller-runtime/pkg/internal/log" ) -var log = logf.RuntimeLog.WithName("object-cache") +var ( + log = logf.RuntimeLog.WithName("object-cache") + defaultSyncPeriod = 10 * time.Hour +) // Cache knows how to load Kubernetes objects, fetch informers to request // to receive events for Kubernetes objects (at a low-level), @@ -98,310 +102,157 @@ type Informer interface { HasSynced() bool } -// ObjectSelector is an alias name of internal.Selector. -type ObjectSelector internal.Selector - -// SelectorsByObject associate a client.Object's GVK to a field/label selector. -// There is also `DefaultSelector` to set a global default (which will be overridden by -// a more specific setting here, if any). -type SelectorsByObject map[client.Object]ObjectSelector - // Options are the optional arguments for creating a new InformersMap object. type Options struct { + // HTTPClient is the http client to use for the REST client + HTTPClient *http.Client + // Scheme is the scheme to use for mapping objects to GroupVersionKinds Scheme *runtime.Scheme // Mapper is the RESTMapper to use for mapping GroupVersionKinds to Resources Mapper meta.RESTMapper - // Resync is the base frequency the informers are resynced. - // Defaults to defaultResyncTime. - // A 10 percent jitter will be added to the Resync period between informers - // So that all informers will not send list requests simultaneously. - Resync *time.Duration - - // Namespace restricts the cache's ListWatch to the desired namespace + // SyncPeriod determines the minimum frequency at which watched resources are + // reconciled. A lower period will correct entropy more quickly, but reduce + // responsiveness to change if there are many watched resources. Change this + // value only if you know what you are doing. Defaults to 10 hours if unset. + // there will a 10 percent jitter between the SyncPeriod of all controllers + // so that all controllers will not send list requests simultaneously. + // + // This applies to all controllers. + // + // A period sync happens for two reasons: + // 1. To insure against a bug in the controller that causes an object to not + // be requeued, when it otherwise should be requeued. + // 2. To insure against an unknown bug in controller-runtime, or its dependencies, + // that causes an object to not be requeued, when it otherwise should be + // requeued, or to be removed from the queue, when it otherwise should not + // be removed. + // + // If you want + // 1. to insure against missed watch events, or + // 2. to poll services that cannot be watched, + // then we recommend that, instead of changing the default period, the + // controller requeue, with a constant duration `t`, whenever the controller + // is "done" with an object, and would otherwise not requeue it, i.e., we + // recommend the `Reconcile` function return `reconcile.Result{RequeueAfter: t}`, + // instead of `reconcile.Result{}`. + SyncPeriod *time.Duration + + // Namespaces restricts the cache's ListWatch to the desired namespaces // Default watches all namespaces - Namespace string + Namespaces []string - // SelectorsByObject restricts the cache's ListWatch to the desired - // fields per GVK at the specified object, the map's value must implement - // Selector [1] using for example a Set [2] - // [1] https://pkg.go.dev/k8s.io/apimachinery/pkg/fields#Selector - // [2] https://pkg.go.dev/k8s.io/apimachinery/pkg/fields#Set - SelectorsByObject SelectorsByObject + // DefaultLabelSelector will be used as a label selectors for all object types + // unless they have a more specific selector set in ByObject. + DefaultLabelSelector labels.Selector - // DefaultSelector will be used as selectors for all object types - // that do not have a selector in SelectorsByObject defined. - DefaultSelector ObjectSelector + // DefaultFieldSelector will be used as a field selectors for all object types + // unless they have a more specific selector set in ByObject. + DefaultFieldSelector fields.Selector - // UnsafeDisableDeepCopyByObject indicates not to deep copy objects during get or - // list objects per GVK at the specified object. + // DefaultTransform will be used as transform for all object types + // unless they have a more specific transform set in ByObject. + DefaultTransform toolscache.TransformFunc + + // ByObject restricts the cache's ListWatch to the desired fields per GVK at the specified object. + ByObject map[client.Object]ByObject + + // UnsafeDisableDeepCopy indicates not to deep copy objects during get or + // list objects for EVERY object. // Be very careful with this, when enabled you must DeepCopy any object before mutating it, // otherwise you will mutate the object in the cache. - UnsafeDisableDeepCopyByObject DisableDeepCopyByObject + // + // This is a global setting for all objects, and can be overridden by the ByObject setting. + UnsafeDisableDeepCopy *bool +} - // TransformByObject is a map from GVKs to transformer functions which +// ByObject offers more fine-grained control over the cache's ListWatch by object. +type ByObject struct { + // Label represents a label selector for the object. + Label labels.Selector + + // Field represents a field selector for the object. + Field fields.Selector + + // Transform is a map from objects to transformer functions which // get applied when objects of the transformation are about to be committed // to cache. // // This function is called both for new objects to enter the cache, - // and for updated objects. - TransformByObject TransformByObject + // and for updated objects. + Transform toolscache.TransformFunc - // DefaultTransform is the transform used for all GVKs which do - // not have an explicit transform func set in TransformByObject - DefaultTransform toolscache.TransformFunc + // UnsafeDisableDeepCopy indicates not to deep copy objects during get or + // list objects per GVK at the specified object. + // Be very careful with this, when enabled you must DeepCopy any object before mutating it, + // otherwise you will mutate the object in the cache. + UnsafeDisableDeepCopy *bool } -var defaultResyncTime = 10 * time.Hour +// NewCacheFunc - Function for creating a new cache from the options and a rest config. +type NewCacheFunc func(config *rest.Config, opts Options) (Cache, error) // New initializes and returns a new Cache. func New(config *rest.Config, opts Options) (Cache, error) { - opts, err := defaultOpts(config, opts) - if err != nil { - return nil, err + if len(opts.Namespaces) == 0 { + opts.Namespaces = []string{metav1.NamespaceAll} } - selectorsByGVK, err := convertToByGVK(opts.SelectorsByObject, opts.DefaultSelector, opts.Scheme) - if err != nil { - return nil, err + if len(opts.Namespaces) > 1 { + return newMultiNamespaceCache(config, opts) } - disableDeepCopyByGVK, err := convertToDisableDeepCopyByGVK(opts.UnsafeDisableDeepCopyByObject, opts.Scheme) - if err != nil { - return nil, err - } - transformByGVK, err := convertToByGVK(opts.TransformByObject, opts.DefaultTransform, opts.Scheme) - if err != nil { - return nil, err - } - transformByObj := internal.TransformFuncByObjectFromMap(transformByGVK) - - internalSelectorsByGVK := internal.SelectorsByGVK{} - for gvk, selector := range selectorsByGVK { - internalSelectorsByGVK[gvk] = internal.Selector(selector) - } - - im := internal.NewInformersMap(config, opts.Scheme, opts.Mapper, *opts.Resync, opts.Namespace, internalSelectorsByGVK, disableDeepCopyByGVK, transformByObj) - return &informerCache{InformersMap: im}, nil -} -// BuilderWithOptions returns a Cache constructor that will build a cache -// honoring the options argument, this is useful to specify options like -// SelectorsByObject -// WARNING: If SelectorsByObject is specified, filtered out resources are not -// returned. -// WARNING: If UnsafeDisableDeepCopy is enabled, you must DeepCopy any object -// returned from cache get/list before mutating it. -func BuilderWithOptions(options Options) NewCacheFunc { - return func(config *rest.Config, inherited Options) (Cache, error) { - var err error - inherited, err = defaultOpts(config, inherited) - if err != nil { - return nil, err - } - options, err = defaultOpts(config, options) - if err != nil { - return nil, err - } - combined, err := options.inheritFrom(inherited) - if err != nil { - return nil, err - } - return New(config, *combined) - } -} - -func (options Options) inheritFrom(inherited Options) (*Options, error) { - var ( - combined Options - err error - ) - combined.Scheme = combineScheme(inherited.Scheme, options.Scheme) - combined.Mapper = selectMapper(inherited.Mapper, options.Mapper) - combined.Resync = selectResync(inherited.Resync, options.Resync) - combined.Namespace = selectNamespace(inherited.Namespace, options.Namespace) - combined.SelectorsByObject, combined.DefaultSelector, err = combineSelectors(inherited, options, combined.Scheme) - if err != nil { - return nil, err - } - combined.UnsafeDisableDeepCopyByObject, err = combineUnsafeDeepCopy(inherited, options, combined.Scheme) - if err != nil { - return nil, err - } - combined.TransformByObject, combined.DefaultTransform, err = combineTransforms(inherited, options, combined.Scheme) + opts, err := defaultOpts(config, opts) if err != nil { return nil, err } - return &combined, nil -} - -func combineScheme(schemes ...*runtime.Scheme) *runtime.Scheme { - var out *runtime.Scheme - for _, sch := range schemes { - if sch == nil { - continue - } - for gvk, t := range sch.AllKnownTypes() { - if out == nil { - out = runtime.NewScheme() - } - out.AddKnownTypeWithName(gvk, reflect.New(t).Interface().(runtime.Object)) - } - } - return out -} - -func selectMapper(def, override meta.RESTMapper) meta.RESTMapper { - if override != nil { - return override - } - return def -} - -func selectResync(def, override *time.Duration) *time.Duration { - if override != nil { - return override - } - return def -} - -func selectNamespace(def, override string) string { - if override != "" { - return override - } - return def -} -func combineSelectors(inherited, options Options, scheme *runtime.Scheme) (SelectorsByObject, ObjectSelector, error) { - // Selectors are combined via logical AND. - // - Combined label selector is a union of the selectors requirements from both sets of options. - // - Combined field selector uses fields.AndSelectors with the combined list of non-nil field selectors - // defined in both sets of options. - // - // There is a bunch of complexity here because we need to convert to SelectorsByGVK - // to be able to match keys between options and inherited and then convert back to SelectorsByObject - optionsSelectorsByGVK, err := convertToByGVK(options.SelectorsByObject, options.DefaultSelector, scheme) - if err != nil { - return nil, ObjectSelector{}, err - } - inheritedSelectorsByGVK, err := convertToByGVK(inherited.SelectorsByObject, inherited.DefaultSelector, inherited.Scheme) - if err != nil { - return nil, ObjectSelector{}, err - } - - for gvk, inheritedSelector := range inheritedSelectorsByGVK { - optionsSelectorsByGVK[gvk] = combineSelector(inheritedSelector, optionsSelectorsByGVK[gvk]) - } - return convertToByObject(optionsSelectorsByGVK, scheme) -} - -func combineSelector(selectors ...ObjectSelector) ObjectSelector { - ls := make([]labels.Selector, 0, len(selectors)) - fs := make([]fields.Selector, 0, len(selectors)) - for _, s := range selectors { - ls = append(ls, s.Label) - fs = append(fs, s.Field) - } - return ObjectSelector{ - Label: combineLabelSelectors(ls...), - Field: combineFieldSelectors(fs...), - } -} - -func combineLabelSelectors(ls ...labels.Selector) labels.Selector { - var combined labels.Selector - for _, l := range ls { - if l == nil { - continue - } - if combined == nil { - combined = labels.NewSelector() - } - reqs, _ := l.Requirements() - combined = combined.Add(reqs...) - } - return combined -} - -func combineFieldSelectors(fs ...fields.Selector) fields.Selector { - nonNil := fs[:0] - for _, f := range fs { - if f == nil { - continue - } - nonNil = append(nonNil, f) - } - if len(nonNil) == 0 { - return nil - } - if len(nonNil) == 1 { - return nonNil[0] - } - return fields.AndSelectors(nonNil...) -} - -func combineUnsafeDeepCopy(inherited, options Options, scheme *runtime.Scheme) (DisableDeepCopyByObject, error) { - // UnsafeDisableDeepCopyByObject is combined via precedence. Only if a value for a particular GVK is unset - // in options will a value from inherited be used. - optionsDisableDeepCopyByGVK, err := convertToDisableDeepCopyByGVK(options.UnsafeDisableDeepCopyByObject, options.Scheme) + byGVK, err := convertToInformerOptsByGVK(opts.ByObject, opts.Scheme) if err != nil { return nil, err } - inheritedDisableDeepCopyByGVK, err := convertToDisableDeepCopyByGVK(inherited.UnsafeDisableDeepCopyByObject, inherited.Scheme) - if err != nil { - return nil, err - } - - for gvk, inheritedDeepCopy := range inheritedDisableDeepCopyByGVK { - if _, ok := optionsDisableDeepCopyByGVK[gvk]; !ok { - if optionsDisableDeepCopyByGVK == nil { - optionsDisableDeepCopyByGVK = map[schema.GroupVersionKind]bool{} - } - optionsDisableDeepCopyByGVK[gvk] = inheritedDeepCopy - } - } - return convertToDisableDeepCopyByObject(optionsDisableDeepCopyByGVK, scheme) + // Set the default selector and transform. + byGVK[schema.GroupVersionKind{}] = internal.InformersOptsByGVK{ + Selector: internal.Selector{ + Label: opts.DefaultLabelSelector, + Field: opts.DefaultFieldSelector, + }, + Transform: opts.DefaultTransform, + UnsafeDisableDeepCopy: opts.UnsafeDisableDeepCopy, + } + + return &informerCache{ + scheme: opts.Scheme, + Informers: internal.NewInformers(config, &internal.InformersOpts{ + HTTPClient: opts.HTTPClient, + Scheme: opts.Scheme, + Mapper: opts.Mapper, + ResyncPeriod: *opts.SyncPeriod, + Namespace: opts.Namespaces[0], + ByGVK: byGVK, + }), + }, nil } -func combineTransforms(inherited, options Options, scheme *runtime.Scheme) (TransformByObject, toolscache.TransformFunc, error) { - // Transform functions are combined via chaining. If both inherited and options define a transform - // function, the transform function from inherited will be called first, and the transform function from - // options will be called second. - optionsTransformByGVK, err := convertToByGVK(options.TransformByObject, options.DefaultTransform, options.Scheme) - if err != nil { - return nil, nil, err - } - inheritedTransformByGVK, err := convertToByGVK(inherited.TransformByObject, inherited.DefaultTransform, inherited.Scheme) - if err != nil { - return nil, nil, err +func defaultOpts(config *rest.Config, opts Options) (Options, error) { + config = rest.CopyConfig(config) + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() } - for gvk, inheritedTransform := range inheritedTransformByGVK { - if optionsTransformByGVK == nil { - optionsTransformByGVK = map[schema.GroupVersionKind]toolscache.TransformFunc{} - } - optionsTransformByGVK[gvk] = combineTransform(inheritedTransform, optionsTransformByGVK[gvk]) - } - return convertToByObject(optionsTransformByGVK, scheme) -} + logger := log.WithName("setup") -func combineTransform(inherited, current toolscache.TransformFunc) toolscache.TransformFunc { - if inherited == nil { - return current - } - if current == nil { - return inherited - } - return func(in interface{}) (interface{}, error) { - mid, err := inherited(in) + // Use the rest HTTP client for the provided config if unset + if opts.HTTPClient == nil { + var err error + opts.HTTPClient, err = rest.HTTPClientFor(config) if err != nil { - return nil, err + logger.Error(err, "Failed to get HTTP client") + return opts, fmt.Errorf("could not create HTTP client from config: %w", err) } - return current(mid) } -} -func defaultOpts(config *rest.Config, opts Options) (Options, error) { // Use the default Kubernetes Scheme if unset if opts.Scheme == nil { opts.Scheme = scheme.Scheme @@ -410,108 +261,38 @@ func defaultOpts(config *rest.Config, opts Options) (Options, error) { // Construct a new Mapper if unset if opts.Mapper == nil { var err error - opts.Mapper, err = apiutil.NewDiscoveryRESTMapper(config) + opts.Mapper, err = apiutil.NewDiscoveryRESTMapper(config, opts.HTTPClient) if err != nil { - log.WithName("setup").Error(err, "Failed to get API Group-Resources") - return opts, fmt.Errorf("could not create RESTMapper from config") + logger.Error(err, "Failed to get API Group-Resources") + return opts, fmt.Errorf("could not create RESTMapper from config: %w", err) } } // Default the resync period to 10 hours if unset - if opts.Resync == nil { - opts.Resync = &defaultResyncTime + if opts.SyncPeriod == nil { + opts.SyncPeriod = &defaultSyncPeriod } return opts, nil } -func convertToByGVK[T any](byObject map[client.Object]T, def T, scheme *runtime.Scheme) (map[schema.GroupVersionKind]T, error) { - byGVK := map[schema.GroupVersionKind]T{} - for object, value := range byObject { +func convertToInformerOptsByGVK(in map[client.Object]ByObject, scheme *runtime.Scheme) (map[schema.GroupVersionKind]internal.InformersOptsByGVK, error) { + out := map[schema.GroupVersionKind]internal.InformersOptsByGVK{} + for object, byObject := range in { gvk, err := apiutil.GVKForObject(object, scheme) if err != nil { return nil, err } - byGVK[gvk] = value - } - byGVK[schema.GroupVersionKind{}] = def - return byGVK, nil -} - -func convertToByObject[T any](byGVK map[schema.GroupVersionKind]T, scheme *runtime.Scheme) (map[client.Object]T, T, error) { - var byObject map[client.Object]T - def := byGVK[schema.GroupVersionKind{}] - for gvk, value := range byGVK { - if gvk == (schema.GroupVersionKind{}) { - continue - } - obj, err := scheme.New(gvk) - if err != nil { - return nil, def, err - } - cObj, ok := obj.(client.Object) - if !ok { - return nil, def, fmt.Errorf("object %T for GVK %q does not implement client.Object", obj, gvk) - } - cObj.GetObjectKind().SetGroupVersionKind(gvk) - if byObject == nil { - byObject = map[client.Object]T{} - } - byObject[cObj] = value - } - return byObject, def, nil -} - -// DisableDeepCopyByObject associate a client.Object's GVK to disable DeepCopy during get or list from cache. -type DisableDeepCopyByObject map[client.Object]bool - -var _ client.Object = &ObjectAll{} - -// ObjectAll is the argument to represent all objects' types. -type ObjectAll struct { - client.Object -} - -func convertToDisableDeepCopyByGVK(disableDeepCopyByObject DisableDeepCopyByObject, scheme *runtime.Scheme) (internal.DisableDeepCopyByGVK, error) { - disableDeepCopyByGVK := internal.DisableDeepCopyByGVK{} - for obj, disable := range disableDeepCopyByObject { - switch obj.(type) { - case ObjectAll, *ObjectAll: - disableDeepCopyByGVK[internal.GroupVersionKindAll] = disable - default: - gvk, err := apiutil.GVKForObject(obj, scheme) - if err != nil { - return nil, err - } - disableDeepCopyByGVK[gvk] = disable + if _, ok := out[gvk]; ok { + return nil, fmt.Errorf("duplicate cache options for GVK %v, cache.Options.ByObject has multiple types with the same GroupVersionKind", gvk) } - } - return disableDeepCopyByGVK, nil -} - -func convertToDisableDeepCopyByObject(byGVK internal.DisableDeepCopyByGVK, scheme *runtime.Scheme) (DisableDeepCopyByObject, error) { - var byObject DisableDeepCopyByObject - for gvk, value := range byGVK { - if byObject == nil { - byObject = DisableDeepCopyByObject{} - } - if gvk == (schema.GroupVersionKind{}) { - byObject[ObjectAll{}] = value - continue - } - obj, err := scheme.New(gvk) - if err != nil { - return nil, err - } - cObj, ok := obj.(client.Object) - if !ok { - return nil, fmt.Errorf("object %T for GVK %q does not implement client.Object", obj, gvk) + out[gvk] = internal.InformersOptsByGVK{ + Selector: internal.Selector{ + Field: byObject.Field, + Label: byObject.Label, + }, + Transform: byObject.Transform, + UnsafeDisableDeepCopy: byObject.UnsafeDisableDeepCopy, } - - byObject[cObj] = value } - return byObject, nil + return out, nil } - -// TransformByObject associate a client.Object's GVK to a transformer function -// to be applied when storing the object into the cache. -type TransformByObject map[client.Object]toolscache.TransformFunc diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go index 08e4e6df59..771244d52a 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go @@ -19,10 +19,10 @@ package cache import ( "context" "fmt" - "reflect" "strings" apimeta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -45,19 +45,21 @@ func (*ErrCacheNotStarted) Error() string { return "the cache is not started, can not read objects" } -// informerCache is a Kubernetes Object cache populated from InformersMap. informerCache wraps an InformersMap. +// informerCache is a Kubernetes Object cache populated from internal.Informers. +// informerCache wraps internal.Informers. type informerCache struct { - *internal.InformersMap + scheme *runtime.Scheme + *internal.Informers } // Get implements Reader. -func (ip *informerCache) Get(ctx context.Context, key client.ObjectKey, out client.Object, opts ...client.GetOption) error { - gvk, err := apiutil.GVKForObject(out, ip.Scheme) +func (ic *informerCache) Get(ctx context.Context, key client.ObjectKey, out client.Object, opts ...client.GetOption) error { + gvk, err := apiutil.GVKForObject(out, ic.scheme) if err != nil { return err } - started, cache, err := ip.InformersMap.Get(ctx, gvk, out) + started, cache, err := ic.Informers.Get(ctx, gvk, out) if err != nil { return err } @@ -69,13 +71,13 @@ func (ip *informerCache) Get(ctx context.Context, key client.ObjectKey, out clie } // List implements Reader. -func (ip *informerCache) List(ctx context.Context, out client.ObjectList, opts ...client.ListOption) error { - gvk, cacheTypeObj, err := ip.objectTypeForListObject(out) +func (ic *informerCache) List(ctx context.Context, out client.ObjectList, opts ...client.ListOption) error { + gvk, cacheTypeObj, err := ic.objectTypeForListObject(out) if err != nil { return err } - started, cache, err := ip.InformersMap.Get(ctx, *gvk, cacheTypeObj) + started, cache, err := ic.Informers.Get(ctx, *gvk, cacheTypeObj) if err != nil { return err } @@ -90,54 +92,46 @@ func (ip *informerCache) List(ctx context.Context, out client.ObjectList, opts . // objectTypeForListObject tries to find the runtime.Object and associated GVK // for a single object corresponding to the passed-in list type. We need them // because they are used as cache map key. -func (ip *informerCache) objectTypeForListObject(list client.ObjectList) (*schema.GroupVersionKind, runtime.Object, error) { - gvk, err := apiutil.GVKForObject(list, ip.Scheme) +func (ic *informerCache) objectTypeForListObject(list client.ObjectList) (*schema.GroupVersionKind, runtime.Object, error) { + gvk, err := apiutil.GVKForObject(list, ic.scheme) if err != nil { return nil, nil, err } - // we need the non-list GVK, so chop off the "List" from the end of the kind - if strings.HasSuffix(gvk.Kind, "List") && apimeta.IsListType(list) { - gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] - } + // We need the non-list GVK, so chop off the "List" from the end of the kind. + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") - _, isUnstructured := list.(*unstructured.UnstructuredList) - var cacheTypeObj runtime.Object - if isUnstructured { + // Handle unstructured.UnstructuredList. + if _, isUnstructured := list.(runtime.Unstructured); isUnstructured { u := &unstructured.Unstructured{} u.SetGroupVersionKind(gvk) - cacheTypeObj = u - } else { - itemsPtr, err := apimeta.GetItemsPtr(list) - if err != nil { - return nil, nil, err - } - // http://knowyourmeme.com/memes/this-is-fine - elemType := reflect.Indirect(reflect.ValueOf(itemsPtr)).Type().Elem() - if elemType.Kind() != reflect.Ptr { - elemType = reflect.PtrTo(elemType) - } - - cacheTypeValue := reflect.Zero(elemType) - var ok bool - cacheTypeObj, ok = cacheTypeValue.Interface().(runtime.Object) - if !ok { - return nil, nil, fmt.Errorf("cannot get cache for %T, its element %T is not a runtime.Object", list, cacheTypeValue.Interface()) - } + return &gvk, u, nil + } + // Handle metav1.PartialObjectMetadataList. + if _, isPartialObjectMetadata := list.(*metav1.PartialObjectMetadataList); isPartialObjectMetadata { + pom := &metav1.PartialObjectMetadata{} + pom.SetGroupVersionKind(gvk) + return &gvk, pom, nil } + // Any other list type should have a corresponding non-list type registered + // in the scheme. Use that to create a new instance of the non-list type. + cacheTypeObj, err := ic.scheme.New(gvk) + if err != nil { + return nil, nil, err + } return &gvk, cacheTypeObj, nil } // GetInformerForKind returns the informer for the GroupVersionKind. -func (ip *informerCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind) (Informer, error) { +func (ic *informerCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind) (Informer, error) { // Map the gvk to an object - obj, err := ip.Scheme.New(gvk) + obj, err := ic.scheme.New(gvk) if err != nil { return nil, err } - _, i, err := ip.InformersMap.Get(ctx, gvk, obj) + _, i, err := ic.Informers.Get(ctx, gvk, obj) if err != nil { return nil, err } @@ -145,13 +139,13 @@ func (ip *informerCache) GetInformerForKind(ctx context.Context, gvk schema.Grou } // GetInformer returns the informer for the obj. -func (ip *informerCache) GetInformer(ctx context.Context, obj client.Object) (Informer, error) { - gvk, err := apiutil.GVKForObject(obj, ip.Scheme) +func (ic *informerCache) GetInformer(ctx context.Context, obj client.Object) (Informer, error) { + gvk, err := apiutil.GVKForObject(obj, ic.scheme) if err != nil { return nil, err } - _, i, err := ip.InformersMap.Get(ctx, gvk, obj) + _, i, err := ic.Informers.Get(ctx, gvk, obj) if err != nil { return nil, err } @@ -160,7 +154,7 @@ func (ip *informerCache) GetInformer(ctx context.Context, obj client.Object) (In // NeedLeaderElection implements the LeaderElectionRunnable interface // to indicate that this can be started without requiring the leader lock. -func (ip *informerCache) NeedLeaderElection() bool { +func (ic *informerCache) NeedLeaderElection() bool { return false } @@ -169,8 +163,8 @@ func (ip *informerCache) NeedLeaderElection() bool { // to List. For one-to-one compatibility with "normal" field selectors, only return one value. // The values may be anything. They will automatically be prefixed with the namespace of the // given object, if present. The objects passed are guaranteed to be objects of the correct type. -func (ip *informerCache) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error { - informer, err := ip.GetInformer(ctx, obj) +func (ic *informerCache) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error { + informer, err := ic.GetInformer(ctx, obj) if err != nil { return err } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go index f78b083382..3c8355bbde 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go @@ -27,9 +27,9 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/tools/cache" - "sigs.k8s.io/controller-runtime/pkg/internal/field/selector" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/internal/field/selector" ) // CacheReader is a client.Reader. @@ -147,7 +147,7 @@ func (c *CacheReader) List(_ context.Context, out client.ObjectList, opts ...cli } obj, isObj := item.(runtime.Object) if !isObj { - return fmt.Errorf("cache contained %T, which is not an Object", obj) + return fmt.Errorf("cache contained %T, which is not an Object", item) } meta, err := apimeta.Accessor(obj) if err != nil { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go deleted file mode 100644 index 27f46e3278..0000000000 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go +++ /dev/null @@ -1,126 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internal - -import ( - "context" - "time" - - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/cache" -) - -// InformersMap create and caches Informers for (runtime.Object, schema.GroupVersionKind) pairs. -// It uses a standard parameter codec constructed based on the given generated Scheme. -type InformersMap struct { - // we abstract over the details of structured/unstructured/metadata with the specificInformerMaps - // TODO(directxman12): genericize this over different projections now that we have 3 different maps - - structured *specificInformersMap - unstructured *specificInformersMap - metadata *specificInformersMap - - // Scheme maps runtime.Objects to GroupVersionKinds - Scheme *runtime.Scheme -} - -// NewInformersMap creates a new InformersMap that can create informers for -// both structured and unstructured objects. -func NewInformersMap(config *rest.Config, - scheme *runtime.Scheme, - mapper meta.RESTMapper, - resync time.Duration, - namespace string, - selectors SelectorsByGVK, - disableDeepCopy DisableDeepCopyByGVK, - transformers TransformFuncByObject, -) *InformersMap { - return &InformersMap{ - structured: newStructuredInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers), - unstructured: newUnstructuredInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers), - metadata: newMetadataInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers), - - Scheme: scheme, - } -} - -// Start calls Run on each of the informers and sets started to true. Blocks on the context. -func (m *InformersMap) Start(ctx context.Context) error { - go m.structured.Start(ctx) - go m.unstructured.Start(ctx) - go m.metadata.Start(ctx) - <-ctx.Done() - return nil -} - -// WaitForCacheSync waits until all the caches have been started and synced. -func (m *InformersMap) WaitForCacheSync(ctx context.Context) bool { - syncedFuncs := append([]cache.InformerSynced(nil), m.structured.HasSyncedFuncs()...) - syncedFuncs = append(syncedFuncs, m.unstructured.HasSyncedFuncs()...) - syncedFuncs = append(syncedFuncs, m.metadata.HasSyncedFuncs()...) - - if !m.structured.waitForStarted(ctx) { - return false - } - if !m.unstructured.waitForStarted(ctx) { - return false - } - if !m.metadata.waitForStarted(ctx) { - return false - } - return cache.WaitForCacheSync(ctx.Done(), syncedFuncs...) -} - -// Get will create a new Informer and add it to the map of InformersMap if none exists. Returns -// the Informer from the map. -func (m *InformersMap) Get(ctx context.Context, gvk schema.GroupVersionKind, obj runtime.Object) (bool, *MapEntry, error) { - switch obj.(type) { - case *unstructured.Unstructured: - return m.unstructured.Get(ctx, gvk, obj) - case *unstructured.UnstructuredList: - return m.unstructured.Get(ctx, gvk, obj) - case *metav1.PartialObjectMetadata: - return m.metadata.Get(ctx, gvk, obj) - case *metav1.PartialObjectMetadataList: - return m.metadata.Get(ctx, gvk, obj) - default: - return m.structured.Get(ctx, gvk, obj) - } -} - -// newStructuredInformersMap creates a new InformersMap for structured objects. -func newStructuredInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration, - namespace string, selectors SelectorsByGVK, disableDeepCopy DisableDeepCopyByGVK, transformers TransformFuncByObject) *specificInformersMap { - return newSpecificInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers, createStructuredListWatch) -} - -// newUnstructuredInformersMap creates a new InformersMap for unstructured objects. -func newUnstructuredInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration, - namespace string, selectors SelectorsByGVK, disableDeepCopy DisableDeepCopyByGVK, transformers TransformFuncByObject) *specificInformersMap { - return newSpecificInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers, createUnstructuredListWatch) -} - -// newMetadataInformersMap creates a new InformersMap for metadata-only objects. -func newMetadataInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration, - namespace string, selectors SelectorsByGVK, disableDeepCopy DisableDeepCopyByGVK, transformers TransformFuncByObject) *specificInformersMap { - return newSpecificInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers, createMetadataListWatch) -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/disabledeepcopy.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/disabledeepcopy.go deleted file mode 100644 index 54bd7eec93..0000000000 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/disabledeepcopy.go +++ /dev/null @@ -1,35 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internal - -import "k8s.io/apimachinery/pkg/runtime/schema" - -// GroupVersionKindAll is the argument to represent all GroupVersionKind types. -var GroupVersionKindAll = schema.GroupVersionKind{} - -// DisableDeepCopyByGVK associate a GroupVersionKind to disable DeepCopy during get or list from cache. -type DisableDeepCopyByGVK map[schema.GroupVersionKind]bool - -// IsDisabled returns whether a GroupVersionKind is disabled DeepCopy. -func (disableByGVK DisableDeepCopyByGVK) IsDisabled(gvk schema.GroupVersionKind) bool { - if d, ok := disableByGVK[gvk]; ok { - return d - } else if d, ok = disableByGVK[GroupVersionKindAll]; ok { - return d - } - return false -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go new file mode 100644 index 0000000000..09e0111114 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go @@ -0,0 +1,560 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "context" + "fmt" + "math/rand" + "net/http" + "sync" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/metadata" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// InformersOpts configures an InformerMap. +type InformersOpts struct { + HTTPClient *http.Client + Scheme *runtime.Scheme + Mapper meta.RESTMapper + ResyncPeriod time.Duration + Namespace string + ByGVK map[schema.GroupVersionKind]InformersOptsByGVK +} + +// InformersOptsByGVK configured additional by group version kind (or object) +// in an InformerMap. +type InformersOptsByGVK struct { + Selector Selector + Transform cache.TransformFunc + UnsafeDisableDeepCopy *bool +} + +// NewInformers creates a new InformersMap that can create informers under the hood. +func NewInformers(config *rest.Config, options *InformersOpts) *Informers { + return &Informers{ + config: config, + httpClient: options.HTTPClient, + scheme: options.Scheme, + mapper: options.Mapper, + tracker: tracker{ + Structured: make(map[schema.GroupVersionKind]*Cache), + Unstructured: make(map[schema.GroupVersionKind]*Cache), + Metadata: make(map[schema.GroupVersionKind]*Cache), + }, + codecs: serializer.NewCodecFactory(options.Scheme), + paramCodec: runtime.NewParameterCodec(options.Scheme), + resync: options.ResyncPeriod, + startWait: make(chan struct{}), + namespace: options.Namespace, + byGVK: options.ByGVK, + } +} + +// Cache contains the cached data for an Cache. +type Cache struct { + // Informer is the cached informer + Informer cache.SharedIndexInformer + + // CacheReader wraps Informer and implements the CacheReader interface for a single type + Reader CacheReader +} + +type tracker struct { + Structured map[schema.GroupVersionKind]*Cache + Unstructured map[schema.GroupVersionKind]*Cache + Metadata map[schema.GroupVersionKind]*Cache +} + +// Informers create and caches Informers for (runtime.Object, schema.GroupVersionKind) pairs. +// It uses a standard parameter codec constructed based on the given generated Scheme. +type Informers struct { + // httpClient is used to create a new REST client + httpClient *http.Client + + // scheme maps runtime.Objects to GroupVersionKinds + scheme *runtime.Scheme + + // config is used to talk to the apiserver + config *rest.Config + + // mapper maps GroupVersionKinds to Resources + mapper meta.RESTMapper + + // tracker tracks informers keyed by their type and groupVersionKind + tracker tracker + + // codecs is used to create a new REST client + codecs serializer.CodecFactory + + // paramCodec is used by list and watch + paramCodec runtime.ParameterCodec + + // resync is the base frequency the informers are resynced + // a 10 percent jitter will be added to the resync period between informers + // so that all informers will not send list requests simultaneously. + resync time.Duration + + // mu guards access to the map + mu sync.RWMutex + + // started is true if the informers have been started + started bool + + // startWait is a channel that is closed after the + // informer has been started. + startWait chan struct{} + + // waitGroup is the wait group that is used to wait for all informers to stop + waitGroup sync.WaitGroup + + // stopped is true if the informers have been stopped + stopped bool + + // ctx is the context to stop informers + ctx context.Context + + // namespace is the namespace that all ListWatches are restricted to + // default or empty string means all namespaces + namespace string + + byGVK map[schema.GroupVersionKind]InformersOptsByGVK +} + +func (ip *Informers) getSelector(gvk schema.GroupVersionKind) Selector { + if ip.byGVK == nil { + return Selector{} + } + if res, ok := ip.byGVK[gvk]; ok { + return res.Selector + } + if res, ok := ip.byGVK[schema.GroupVersionKind{}]; ok { + return res.Selector + } + return Selector{} +} + +func (ip *Informers) getTransform(gvk schema.GroupVersionKind) cache.TransformFunc { + if ip.byGVK == nil { + return nil + } + if res, ok := ip.byGVK[gvk]; ok { + return res.Transform + } + if res, ok := ip.byGVK[schema.GroupVersionKind{}]; ok { + return res.Transform + } + return nil +} + +func (ip *Informers) getDisableDeepCopy(gvk schema.GroupVersionKind) bool { + if ip.byGVK == nil { + return false + } + if res, ok := ip.byGVK[gvk]; ok && res.UnsafeDisableDeepCopy != nil { + return *res.UnsafeDisableDeepCopy + } + if res, ok := ip.byGVK[schema.GroupVersionKind{}]; ok && res.UnsafeDisableDeepCopy != nil { + return *res.UnsafeDisableDeepCopy + } + return false +} + +// Start calls Run on each of the informers and sets started to true. Blocks on the context. +// It doesn't return start because it can't return an error, and it's not a runnable directly. +func (ip *Informers) Start(ctx context.Context) error { + func() { + ip.mu.Lock() + defer ip.mu.Unlock() + + // Set the context so it can be passed to informers that are added later + ip.ctx = ctx + + // Start each informer + for _, i := range ip.tracker.Structured { + ip.startInformerLocked(i.Informer) + } + for _, i := range ip.tracker.Unstructured { + ip.startInformerLocked(i.Informer) + } + for _, i := range ip.tracker.Metadata { + ip.startInformerLocked(i.Informer) + } + + // Set started to true so we immediately start any informers added later. + ip.started = true + close(ip.startWait) + }() + <-ctx.Done() // Block until the context is done + ip.mu.Lock() + ip.stopped = true // Set stopped to true so we don't start any new informers + ip.mu.Unlock() + ip.waitGroup.Wait() // Block until all informers have stopped + return nil +} + +func (ip *Informers) startInformerLocked(informer cache.SharedIndexInformer) { + // Don't start the informer in case we are already waiting for the items in + // the waitGroup to finish, since waitGroups don't support waiting and adding + // at the same time. + if ip.stopped { + return + } + + ip.waitGroup.Add(1) + go func() { + defer ip.waitGroup.Done() + informer.Run(ip.ctx.Done()) + }() +} + +func (ip *Informers) waitForStarted(ctx context.Context) bool { + select { + case <-ip.startWait: + return true + case <-ctx.Done(): + return false + } +} + +// getHasSyncedFuncs returns all the HasSynced functions for the informers in this map. +func (ip *Informers) getHasSyncedFuncs() []cache.InformerSynced { + ip.mu.RLock() + defer ip.mu.RUnlock() + + res := make([]cache.InformerSynced, 0, + len(ip.tracker.Structured)+len(ip.tracker.Unstructured)+len(ip.tracker.Metadata), + ) + for _, i := range ip.tracker.Structured { + res = append(res, i.Informer.HasSynced) + } + for _, i := range ip.tracker.Unstructured { + res = append(res, i.Informer.HasSynced) + } + for _, i := range ip.tracker.Metadata { + res = append(res, i.Informer.HasSynced) + } + return res +} + +// WaitForCacheSync waits until all the caches have been started and synced. +func (ip *Informers) WaitForCacheSync(ctx context.Context) bool { + if !ip.waitForStarted(ctx) { + return false + } + return cache.WaitForCacheSync(ctx.Done(), ip.getHasSyncedFuncs()...) +} + +func (ip *Informers) get(gvk schema.GroupVersionKind, obj runtime.Object) (res *Cache, started bool, ok bool) { + ip.mu.RLock() + defer ip.mu.RUnlock() + i, ok := ip.informersByType(obj)[gvk] + return i, ip.started, ok +} + +// Get will create a new Informer and add it to the map of specificInformersMap if none exists. Returns +// the Informer from the map. +func (ip *Informers) Get(ctx context.Context, gvk schema.GroupVersionKind, obj runtime.Object) (bool, *Cache, error) { + // Return the informer if it is found + i, started, ok := ip.get(gvk, obj) + if !ok { + var err error + if i, started, err = ip.addInformerToMap(gvk, obj); err != nil { + return started, nil, err + } + } + + if started && !i.Informer.HasSynced() { + // Wait for it to sync before returning the Informer so that folks don't read from a stale cache. + if !cache.WaitForCacheSync(ctx.Done(), i.Informer.HasSynced) { + return started, nil, apierrors.NewTimeoutError(fmt.Sprintf("failed waiting for %T Informer to sync", obj), 0) + } + } + + return started, i, nil +} + +func (ip *Informers) informersByType(obj runtime.Object) map[schema.GroupVersionKind]*Cache { + switch obj.(type) { + case runtime.Unstructured: + return ip.tracker.Unstructured + case *metav1.PartialObjectMetadata, *metav1.PartialObjectMetadataList: + return ip.tracker.Metadata + default: + return ip.tracker.Structured + } +} + +func (ip *Informers) addInformerToMap(gvk schema.GroupVersionKind, obj runtime.Object) (*Cache, bool, error) { + ip.mu.Lock() + defer ip.mu.Unlock() + + // Check the cache to see if we already have an Informer. If we do, return the Informer. + // This is for the case where 2 routines tried to get the informer when it wasn't in the map + // so neither returned early, but the first one created it. + if i, ok := ip.informersByType(obj)[gvk]; ok { + return i, ip.started, nil + } + + // Create a NewSharedIndexInformer and add it to the map. + listWatcher, err := ip.makeListWatcher(gvk, obj) + if err != nil { + return nil, false, err + } + sharedIndexInformer := cache.NewSharedIndexInformer(&cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + ip.getSelector(gvk).ApplyToList(&opts) + return listWatcher.ListFunc(opts) + }, + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + ip.getSelector(gvk).ApplyToList(&opts) + opts.Watch = true // Watch needs to be set to true separately + return listWatcher.WatchFunc(opts) + }, + }, obj, calculateResyncPeriod(ip.resync), cache.Indexers{ + cache.NamespaceIndex: cache.MetaNamespaceIndexFunc, + }) + + // Check to see if there is a transformer for this gvk + if err := sharedIndexInformer.SetTransform(ip.getTransform(gvk)); err != nil { + return nil, false, err + } + + mapping, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, false, err + } + + // Create the new entry and set it in the map. + i := &Cache{ + Informer: sharedIndexInformer, + Reader: CacheReader{ + indexer: sharedIndexInformer.GetIndexer(), + groupVersionKind: gvk, + scopeName: mapping.Scope.Name(), + disableDeepCopy: ip.getDisableDeepCopy(gvk), + }, + } + ip.informersByType(obj)[gvk] = i + + // Start the informer in case the InformersMap has started, otherwise it will be + // started when the InformersMap starts. + if ip.started { + ip.startInformerLocked(i.Informer) + } + return i, ip.started, nil +} + +func (ip *Informers) makeListWatcher(gvk schema.GroupVersionKind, obj runtime.Object) (*cache.ListWatch, error) { + // Kubernetes APIs work against Resources, not GroupVersionKinds. Map the + // groupVersionKind to the Resource API we will use. + mapping, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, err + } + + // Figure out if the GVK we're dealing with is global, or namespace scoped. + var namespace string + if mapping.Scope.Name() == meta.RESTScopeNameNamespace { + namespace = restrictNamespaceBySelector(ip.namespace, ip.getSelector(gvk)) + } + + switch obj.(type) { + // + // Unstructured + // + case runtime.Unstructured: + // If the rest configuration has a negotiated serializer passed in, + // we should remove it and use the one that the dynamic client sets for us. + cfg := rest.CopyConfig(ip.config) + cfg.NegotiatedSerializer = nil + dynamicClient, err := dynamic.NewForConfigAndClient(cfg, ip.httpClient) + if err != nil { + return nil, err + } + resources := dynamicClient.Resource(mapping.Resource) + return &cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + if namespace != "" { + return resources.Namespace(namespace).List(ip.ctx, opts) + } + return resources.List(ip.ctx, opts) + }, + // Setup the watch function + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + if namespace != "" { + return resources.Namespace(namespace).Watch(ip.ctx, opts) + } + return resources.Watch(ip.ctx, opts) + }, + }, nil + // + // Metadata + // + case *metav1.PartialObjectMetadata, *metav1.PartialObjectMetadataList: + // Always clear the negotiated serializer and use the one + // set from the metadata client. + cfg := rest.CopyConfig(ip.config) + cfg.NegotiatedSerializer = nil + + // Grab the metadata metadataClient. + metadataClient, err := metadata.NewForConfigAndClient(cfg, ip.httpClient) + if err != nil { + return nil, err + } + resources := metadataClient.Resource(mapping.Resource) + + return &cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + var ( + list *metav1.PartialObjectMetadataList + err error + ) + if namespace != "" { + list, err = resources.Namespace(namespace).List(ip.ctx, opts) + } else { + list, err = resources.List(ip.ctx, opts) + } + if list != nil { + for i := range list.Items { + list.Items[i].SetGroupVersionKind(gvk) + } + } + return list, err + }, + // Setup the watch function + WatchFunc: func(opts metav1.ListOptions) (watcher watch.Interface, err error) { + if namespace != "" { + watcher, err = resources.Namespace(namespace).Watch(ip.ctx, opts) + } else { + watcher, err = resources.Watch(ip.ctx, opts) + } + if err != nil { + return nil, err + } + return newGVKFixupWatcher(gvk, watcher), nil + }, + }, nil + // + // Structured. + // + default: + client, err := apiutil.RESTClientForGVK(gvk, false, ip.config, ip.codecs, ip.httpClient) + if err != nil { + return nil, err + } + listGVK := gvk.GroupVersion().WithKind(gvk.Kind + "List") + listObj, err := ip.scheme.New(listGVK) + if err != nil { + return nil, err + } + return &cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + // Build the request. + req := client.Get().Resource(mapping.Resource.Resource).VersionedParams(&opts, ip.paramCodec) + if namespace != "" { + req.Namespace(namespace) + } + + // Create the resulting object, and execute the request. + res := listObj.DeepCopyObject() + if err := req.Do(ip.ctx).Into(res); err != nil { + return nil, err + } + return res, nil + }, + // Setup the watch function + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + // Build the request. + req := client.Get().Resource(mapping.Resource.Resource).VersionedParams(&opts, ip.paramCodec) + if namespace != "" { + req.Namespace(namespace) + } + // Call the watch. + return req.Watch(ip.ctx) + }, + }, nil + } +} + +// newGVKFixupWatcher adds a wrapper that preserves the GVK information when +// events come in. +// +// This works around a bug where GVK information is not passed into mapping +// functions when using the OnlyMetadata option in the builder. +// This issue is most likely caused by kubernetes/kubernetes#80609. +// See kubernetes-sigs/controller-runtime#1484. +// +// This was originally implemented as a cache.ResourceEventHandler wrapper but +// that contained a data race which was resolved by setting the GVK in a watch +// wrapper, before the objects are written to the cache. +// See kubernetes-sigs/controller-runtime#1650. +// +// The original watch wrapper was found to be incompatible with +// k8s.io/client-go/tools/cache.Reflector so it has been re-implemented as a +// watch.Filter which is compatible. +// See kubernetes-sigs/controller-runtime#1789. +func newGVKFixupWatcher(gvk schema.GroupVersionKind, watcher watch.Interface) watch.Interface { + return watch.Filter( + watcher, + func(in watch.Event) (watch.Event, bool) { + in.Object.GetObjectKind().SetGroupVersionKind(gvk) + return in, true + }, + ) +} + +// calculateResyncPeriod returns a duration based on the desired input +// this is so that multiple controllers don't get into lock-step and all +// hammer the apiserver with list requests simultaneously. +func calculateResyncPeriod(resync time.Duration) time.Duration { + // the factor will fall into [0.9, 1.1) + factor := rand.Float64()/5.0 + 0.9 //nolint:gosec + return time.Duration(float64(resync.Nanoseconds()) * factor) +} + +// restrictNamespaceBySelector returns either a global restriction for all ListWatches +// if not default/empty, or the namespace that a ListWatch for the specific resource +// is restricted to, based on a specified field selector for metadata.namespace field. +func restrictNamespaceBySelector(namespaceOpt string, s Selector) string { + if namespaceOpt != "" { + // namespace is already restricted + return namespaceOpt + } + fieldSelector := s.Field + if fieldSelector == nil || fieldSelector.Empty() { + return "" + } + // check whether a selector includes the namespace field + value, found := fieldSelector.RequiresExactMatch("metadata.namespace") + if found { + return value + } + return "" +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go deleted file mode 100644 index 1524d2316f..0000000000 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go +++ /dev/null @@ -1,480 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internal - -import ( - "context" - "fmt" - "math/rand" - "sync" - "time" - - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/metadata" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/cache" - - "sigs.k8s.io/controller-runtime/pkg/client/apiutil" -) - -func init() { - rand.Seed(time.Now().UnixNano()) -} - -// clientListWatcherFunc knows how to create a ListWatcher. -type createListWatcherFunc func(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error) - -// newSpecificInformersMap returns a new specificInformersMap (like -// the generical InformersMap, except that it doesn't implement WaitForCacheSync). -func newSpecificInformersMap(config *rest.Config, - scheme *runtime.Scheme, - mapper meta.RESTMapper, - resync time.Duration, - namespace string, - selectors SelectorsByGVK, - disableDeepCopy DisableDeepCopyByGVK, - transformers TransformFuncByObject, - createListWatcher createListWatcherFunc, -) *specificInformersMap { - ip := &specificInformersMap{ - config: config, - Scheme: scheme, - mapper: mapper, - informersByGVK: make(map[schema.GroupVersionKind]*MapEntry), - codecs: serializer.NewCodecFactory(scheme), - paramCodec: runtime.NewParameterCodec(scheme), - resync: resync, - startWait: make(chan struct{}), - createListWatcher: createListWatcher, - namespace: namespace, - selectors: selectors.forGVK, - disableDeepCopy: disableDeepCopy, - transformers: transformers, - } - return ip -} - -// MapEntry contains the cached data for an Informer. -type MapEntry struct { - // Informer is the cached informer - Informer cache.SharedIndexInformer - - // CacheReader wraps Informer and implements the CacheReader interface for a single type - Reader CacheReader -} - -// specificInformersMap create and caches Informers for (runtime.Object, schema.GroupVersionKind) pairs. -// It uses a standard parameter codec constructed based on the given generated Scheme. -type specificInformersMap struct { - // Scheme maps runtime.Objects to GroupVersionKinds - Scheme *runtime.Scheme - - // config is used to talk to the apiserver - config *rest.Config - - // mapper maps GroupVersionKinds to Resources - mapper meta.RESTMapper - - // informersByGVK is the cache of informers keyed by groupVersionKind - informersByGVK map[schema.GroupVersionKind]*MapEntry - - // codecs is used to create a new REST client - codecs serializer.CodecFactory - - // paramCodec is used by list and watch - paramCodec runtime.ParameterCodec - - // stop is the stop channel to stop informers - stop <-chan struct{} - - // resync is the base frequency the informers are resynced - // a 10 percent jitter will be added to the resync period between informers - // so that all informers will not send list requests simultaneously. - resync time.Duration - - // mu guards access to the map - mu sync.RWMutex - - // start is true if the informers have been started - started bool - - // startWait is a channel that is closed after the - // informer has been started. - startWait chan struct{} - - // createClient knows how to create a client and a list object, - // and allows for abstracting over the particulars of structured vs - // unstructured objects. - createListWatcher createListWatcherFunc - - // namespace is the namespace that all ListWatches are restricted to - // default or empty string means all namespaces - namespace string - - // selectors are the label or field selectors that will be added to the - // ListWatch ListOptions. - selectors func(gvk schema.GroupVersionKind) Selector - - // disableDeepCopy indicates not to deep copy objects during get or list objects. - disableDeepCopy DisableDeepCopyByGVK - - // transform funcs are applied to objects before they are committed to the cache - transformers TransformFuncByObject -} - -// Start calls Run on each of the informers and sets started to true. Blocks on the context. -// It doesn't return start because it can't return an error, and it's not a runnable directly. -func (ip *specificInformersMap) Start(ctx context.Context) { - func() { - ip.mu.Lock() - defer ip.mu.Unlock() - - // Set the stop channel so it can be passed to informers that are added later - ip.stop = ctx.Done() - - // Start each informer - for _, informer := range ip.informersByGVK { - go informer.Informer.Run(ctx.Done()) - } - - // Set started to true so we immediately start any informers added later. - ip.started = true - close(ip.startWait) - }() - <-ctx.Done() -} - -func (ip *specificInformersMap) waitForStarted(ctx context.Context) bool { - select { - case <-ip.startWait: - return true - case <-ctx.Done(): - return false - } -} - -// HasSyncedFuncs returns all the HasSynced functions for the informers in this map. -func (ip *specificInformersMap) HasSyncedFuncs() []cache.InformerSynced { - ip.mu.RLock() - defer ip.mu.RUnlock() - syncedFuncs := make([]cache.InformerSynced, 0, len(ip.informersByGVK)) - for _, informer := range ip.informersByGVK { - syncedFuncs = append(syncedFuncs, informer.Informer.HasSynced) - } - return syncedFuncs -} - -// Get will create a new Informer and add it to the map of specificInformersMap if none exists. Returns -// the Informer from the map. -func (ip *specificInformersMap) Get(ctx context.Context, gvk schema.GroupVersionKind, obj runtime.Object) (bool, *MapEntry, error) { - // Return the informer if it is found - i, started, ok := func() (*MapEntry, bool, bool) { - ip.mu.RLock() - defer ip.mu.RUnlock() - i, ok := ip.informersByGVK[gvk] - return i, ip.started, ok - }() - - if !ok { - var err error - if i, started, err = ip.addInformerToMap(gvk, obj); err != nil { - return started, nil, err - } - } - - if started && !i.Informer.HasSynced() { - // Wait for it to sync before returning the Informer so that folks don't read from a stale cache. - if !cache.WaitForCacheSync(ctx.Done(), i.Informer.HasSynced) { - return started, nil, apierrors.NewTimeoutError(fmt.Sprintf("failed waiting for %T Informer to sync", obj), 0) - } - } - - return started, i, nil -} - -func (ip *specificInformersMap) addInformerToMap(gvk schema.GroupVersionKind, obj runtime.Object) (*MapEntry, bool, error) { - ip.mu.Lock() - defer ip.mu.Unlock() - - // Check the cache to see if we already have an Informer. If we do, return the Informer. - // This is for the case where 2 routines tried to get the informer when it wasn't in the map - // so neither returned early, but the first one created it. - if i, ok := ip.informersByGVK[gvk]; ok { - return i, ip.started, nil - } - - // Create a NewSharedIndexInformer and add it to the map. - var lw *cache.ListWatch - lw, err := ip.createListWatcher(gvk, ip) - if err != nil { - return nil, false, err - } - ni := cache.NewSharedIndexInformer(lw, obj, resyncPeriod(ip.resync)(), cache.Indexers{ - cache.NamespaceIndex: cache.MetaNamespaceIndexFunc, - }) - - // Check to see if there is a transformer for this gvk - if err := ni.SetTransform(ip.transformers.Get(gvk)); err != nil { - return nil, false, err - } - - rm, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) - if err != nil { - return nil, false, err - } - - i := &MapEntry{ - Informer: ni, - Reader: CacheReader{ - indexer: ni.GetIndexer(), - groupVersionKind: gvk, - scopeName: rm.Scope.Name(), - disableDeepCopy: ip.disableDeepCopy.IsDisabled(gvk), - }, - } - ip.informersByGVK[gvk] = i - - // Start the Informer if need by - // TODO(seans): write thorough tests and document what happens here - can you add indexers? - // can you add eventhandlers? - if ip.started { - go i.Informer.Run(ip.stop) - } - return i, ip.started, nil -} - -// newListWatch returns a new ListWatch object that can be used to create a SharedIndexInformer. -func createStructuredListWatch(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error) { - // Kubernetes APIs work against Resources, not GroupVersionKinds. Map the - // groupVersionKind to the Resource API we will use. - mapping, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) - if err != nil { - return nil, err - } - - client, err := apiutil.RESTClientForGVK(gvk, false, ip.config, ip.codecs) - if err != nil { - return nil, err - } - listGVK := gvk.GroupVersion().WithKind(gvk.Kind + "List") - listObj, err := ip.Scheme.New(listGVK) - if err != nil { - return nil, err - } - - // TODO: the functions that make use of this ListWatch should be adapted to - // pass in their own contexts instead of relying on this fixed one here. - ctx := context.TODO() - // Create a new ListWatch for the obj - return &cache.ListWatch{ - ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { - ip.selectors(gvk).ApplyToList(&opts) - res := listObj.DeepCopyObject() - namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) - isNamespaceScoped := namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot - err := client.Get().NamespaceIfScoped(namespace, isNamespaceScoped).Resource(mapping.Resource.Resource).VersionedParams(&opts, ip.paramCodec).Do(ctx).Into(res) - return res, err - }, - // Setup the watch function - WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { - ip.selectors(gvk).ApplyToList(&opts) - // Watch needs to be set to true separately - opts.Watch = true - namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) - isNamespaceScoped := namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot - return client.Get().NamespaceIfScoped(namespace, isNamespaceScoped).Resource(mapping.Resource.Resource).VersionedParams(&opts, ip.paramCodec).Watch(ctx) - }, - }, nil -} - -func createUnstructuredListWatch(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error) { - // Kubernetes APIs work against Resources, not GroupVersionKinds. Map the - // groupVersionKind to the Resource API we will use. - mapping, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) - if err != nil { - return nil, err - } - - // If the rest configuration has a negotiated serializer passed in, - // we should remove it and use the one that the dynamic client sets for us. - cfg := rest.CopyConfig(ip.config) - cfg.NegotiatedSerializer = nil - dynamicClient, err := dynamic.NewForConfig(cfg) - if err != nil { - return nil, err - } - - // TODO: the functions that make use of this ListWatch should be adapted to - // pass in their own contexts instead of relying on this fixed one here. - ctx := context.TODO() - // Create a new ListWatch for the obj - return &cache.ListWatch{ - ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { - ip.selectors(gvk).ApplyToList(&opts) - namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) - if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { - return dynamicClient.Resource(mapping.Resource).Namespace(namespace).List(ctx, opts) - } - return dynamicClient.Resource(mapping.Resource).List(ctx, opts) - }, - // Setup the watch function - WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { - ip.selectors(gvk).ApplyToList(&opts) - // Watch needs to be set to true separately - opts.Watch = true - namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) - if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { - return dynamicClient.Resource(mapping.Resource).Namespace(namespace).Watch(ctx, opts) - } - return dynamicClient.Resource(mapping.Resource).Watch(ctx, opts) - }, - }, nil -} - -func createMetadataListWatch(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error) { - // Kubernetes APIs work against Resources, not GroupVersionKinds. Map the - // groupVersionKind to the Resource API we will use. - mapping, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) - if err != nil { - return nil, err - } - - // Always clear the negotiated serializer and use the one - // set from the metadata client. - cfg := rest.CopyConfig(ip.config) - cfg.NegotiatedSerializer = nil - - // grab the metadata client - client, err := metadata.NewForConfig(cfg) - if err != nil { - return nil, err - } - - // TODO: the functions that make use of this ListWatch should be adapted to - // pass in their own contexts instead of relying on this fixed one here. - ctx := context.TODO() - - // create the relevant listwatch - return &cache.ListWatch{ - ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { - ip.selectors(gvk).ApplyToList(&opts) - - var ( - list *metav1.PartialObjectMetadataList - err error - ) - namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) - if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { - list, err = client.Resource(mapping.Resource).Namespace(namespace).List(ctx, opts) - } else { - list, err = client.Resource(mapping.Resource).List(ctx, opts) - } - if list != nil { - for i := range list.Items { - list.Items[i].SetGroupVersionKind(gvk) - } - } - return list, err - }, - // Setup the watch function - WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { - ip.selectors(gvk).ApplyToList(&opts) - // Watch needs to be set to true separately - opts.Watch = true - - var ( - watcher watch.Interface - err error - ) - namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) - if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { - watcher, err = client.Resource(mapping.Resource).Namespace(namespace).Watch(ctx, opts) - } else { - watcher, err = client.Resource(mapping.Resource).Watch(ctx, opts) - } - if watcher != nil { - watcher = newGVKFixupWatcher(gvk, watcher) - } - return watcher, err - }, - }, nil -} - -// newGVKFixupWatcher adds a wrapper that preserves the GVK information when -// events come in. -// -// This works around a bug where GVK information is not passed into mapping -// functions when using the OnlyMetadata option in the builder. -// This issue is most likely caused by kubernetes/kubernetes#80609. -// See kubernetes-sigs/controller-runtime#1484. -// -// This was originally implemented as a cache.ResourceEventHandler wrapper but -// that contained a data race which was resolved by setting the GVK in a watch -// wrapper, before the objects are written to the cache. -// See kubernetes-sigs/controller-runtime#1650. -// -// The original watch wrapper was found to be incompatible with -// k8s.io/client-go/tools/cache.Reflector so it has been re-implemented as a -// watch.Filter which is compatible. -// See kubernetes-sigs/controller-runtime#1789. -func newGVKFixupWatcher(gvk schema.GroupVersionKind, watcher watch.Interface) watch.Interface { - return watch.Filter( - watcher, - func(in watch.Event) (watch.Event, bool) { - in.Object.GetObjectKind().SetGroupVersionKind(gvk) - return in, true - }, - ) -} - -// resyncPeriod returns a function which generates a duration each time it is -// invoked; this is so that multiple controllers don't get into lock-step and all -// hammer the apiserver with list requests simultaneously. -func resyncPeriod(resync time.Duration) func() time.Duration { - return func() time.Duration { - // the factor will fall into [0.9, 1.1) - factor := rand.Float64()/5.0 + 0.9 //nolint:gosec - return time.Duration(float64(resync.Nanoseconds()) * factor) - } -} - -// restrictNamespaceBySelector returns either a global restriction for all ListWatches -// if not default/empty, or the namespace that a ListWatch for the specific resource -// is restricted to, based on a specified field selector for metadata.namespace field. -func restrictNamespaceBySelector(namespaceOpt string, s Selector) string { - if namespaceOpt != "" { - // namespace is already restricted - return namespaceOpt - } - fieldSelector := s.Field - if fieldSelector == nil || fieldSelector.Empty() { - return "" - } - // check whether a selector includes the namespace field - value, found := fieldSelector.RequiresExactMatch("metadata.namespace") - if found { - return value - } - return "" -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/selector.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/selector.go index 4eff32fb35..c674379b99 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/selector.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/selector.go @@ -20,23 +20,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime/schema" ) -// SelectorsByGVK associate a GroupVersionKind to a field/label selector. -type SelectorsByGVK map[schema.GroupVersionKind]Selector - -func (s SelectorsByGVK) forGVK(gvk schema.GroupVersionKind) Selector { - if specific, found := s[gvk]; found { - return specific - } - if defaultSelector, found := s[schema.GroupVersionKind{}]; found { - return defaultSelector - } - - return Selector{} -} - // Selector specify the label/field selector to fill in ListOptions. type Selector struct { Label labels.Selector diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/transformers.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/transformers.go index f69e02262a..0725f550c5 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/transformers.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/transformers.go @@ -8,9 +8,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/apiutil" ) -// TransformFuncByObject provides access to the correct transform function for +// TransformFuncByGVK provides access to the correct transform function for // any given GVK. -type TransformFuncByObject interface { +type TransformFuncByGVK interface { Set(runtime.Object, *runtime.Scheme, cache.TransformFunc) error Get(schema.GroupVersionKind) cache.TransformFunc SetDefault(transformer cache.TransformFunc) @@ -21,9 +21,9 @@ type transformFuncByGVK struct { transformers map[schema.GroupVersionKind]cache.TransformFunc } -// TransformFuncByObjectFromMap creates a TransformFuncByObject from a map that +// TransformFuncByGVKFromMap creates a TransformFuncByGVK from a map that // maps GVKs to TransformFuncs. -func TransformFuncByObjectFromMap(in map[schema.GroupVersionKind]cache.TransformFunc) TransformFuncByObject { +func TransformFuncByGVKFromMap(in map[schema.GroupVersionKind]cache.TransformFunc) TransformFuncByGVK { byGVK := &transformFuncByGVK{} if defaultFunc, hasDefault := in[schema.GroupVersionKind{}]; hasDefault { byGVK.defaultTransform = defaultFunc diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go index fccb364710..ac97beae94 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go @@ -28,12 +28,9 @@ import ( "k8s.io/client-go/rest" toolscache "k8s.io/client-go/tools/cache" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/internal/objectutil" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" ) -// NewCacheFunc - Function for creating a new cache from the options and a rest config. -type NewCacheFunc func(config *rest.Config, opts Options) (Cache, error) - // a new global namespaced cache to handle cluster scoped resources. const globalCache = "_cluster-scope" @@ -43,31 +40,43 @@ const globalCache = "_cluster-scope" // a global cache for cluster scoped resource. Note that this is not intended // to be used for excluding namespaces, this is better done via a Predicate. Also note that // you may face performance issues when using this with a high number of namespaces. +// +// Deprecated: Use cache.Options.Namespaces instead. func MultiNamespacedCacheBuilder(namespaces []string) NewCacheFunc { return func(config *rest.Config, opts Options) (Cache, error) { - opts, err := defaultOpts(config, opts) - if err != nil { - return nil, err - } + opts.Namespaces = namespaces + return newMultiNamespaceCache(config, opts) + } +} - caches := map[string]Cache{} +func newMultiNamespaceCache(config *rest.Config, opts Options) (Cache, error) { + if len(opts.Namespaces) < 2 { + return nil, fmt.Errorf("must specify more than one namespace to use multi-namespace cache") + } + opts, err := defaultOpts(config, opts) + if err != nil { + return nil, err + } - // create a cache for cluster scoped resources - gCache, err := New(config, opts) + // Create every namespace cache. + caches := map[string]Cache{} + for _, ns := range opts.Namespaces { + opts.Namespaces = []string{ns} + c, err := New(config, opts) if err != nil { - return nil, fmt.Errorf("error creating global cache: %w", err) + return nil, err } + caches[ns] = c + } - for _, ns := range namespaces { - opts.Namespace = ns - c, err := New(config, opts) - if err != nil { - return nil, err - } - caches[ns] = c - } - return &multiNamespaceCache{namespaceToCache: caches, Scheme: opts.Scheme, RESTMapper: opts.Mapper, clusterCache: gCache}, nil + // Create a cache for cluster scoped resources. + opts.Namespaces = []string{} + gCache, err := New(config, opts) + if err != nil { + return nil, fmt.Errorf("error creating global cache: %w", err) } + + return &multiNamespaceCache{namespaceToCache: caches, Scheme: opts.Scheme, RESTMapper: opts.Mapper, clusterCache: gCache}, nil } // multiNamespaceCache knows how to handle multiple namespaced caches @@ -89,7 +98,7 @@ func (c *multiNamespaceCache) GetInformer(ctx context.Context, obj client.Object // If the object is clusterscoped, get the informer from clusterCache, // if not use the namespaced caches. - isNamespaced, err := objectutil.IsAPINamespaced(obj, c.Scheme, c.RESTMapper) + isNamespaced, err := apiutil.IsObjectNamespaced(obj, c.Scheme, c.RESTMapper) if err != nil { return nil, err } @@ -119,7 +128,7 @@ func (c *multiNamespaceCache) GetInformerForKind(ctx context.Context, gvk schema // If the object is clusterscoped, get the informer from clusterCache, // if not use the namespaced caches. - isNamespaced, err := objectutil.IsAPINamespacedWithGVK(gvk, c.Scheme, c.RESTMapper) + isNamespaced, err := apiutil.IsGVKNamespaced(gvk, c.RESTMapper) if err != nil { return nil, err } @@ -183,9 +192,9 @@ func (c *multiNamespaceCache) WaitForCacheSync(ctx context.Context) bool { } func (c *multiNamespaceCache) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error { - isNamespaced, err := objectutil.IsAPINamespaced(obj, c.Scheme, c.RESTMapper) + isNamespaced, err := apiutil.IsObjectNamespaced(obj, c.Scheme, c.RESTMapper) if err != nil { - return nil //nolint:nilerr + return err } if !isNamespaced { @@ -201,7 +210,7 @@ func (c *multiNamespaceCache) IndexField(ctx context.Context, obj client.Object, } func (c *multiNamespaceCache) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { - isNamespaced, err := objectutil.IsAPINamespaced(obj, c.Scheme, c.RESTMapper) + isNamespaced, err := apiutil.IsObjectNamespaced(obj, c.Scheme, c.RESTMapper) if err != nil { return err } @@ -223,7 +232,7 @@ func (c *multiNamespaceCache) List(ctx context.Context, list client.ObjectList, listOpts := client.ListOptions{} listOpts.ApplyOptions(opts) - isNamespaced, err := objectutil.IsAPINamespaced(list, c.Scheme, c.RESTMapper) + isNamespaced, err := apiutil.IsObjectNamespaced(list, c.Scheme, c.RESTMapper) if err != nil { return err } @@ -293,42 +302,63 @@ type multiNamespaceInformer struct { namespaceToInformer map[string]Informer } +type handlerRegistration struct { + handles map[string]toolscache.ResourceEventHandlerRegistration +} + +type syncer interface { + HasSynced() bool +} + +// HasSynced asserts that the handler has been called for the full initial state of the informer. +// This uses syncer to be compatible between client-go 1.27+ and older versions when the interface changed. +func (h handlerRegistration) HasSynced() bool { + for _, reg := range h.handles { + if s, ok := reg.(syncer); ok { + if !s.HasSynced() { + return false + } + } + } + return true +} + var _ Informer = &multiNamespaceInformer{} // AddEventHandler adds the handler to each namespaced informer. func (i *multiNamespaceInformer) AddEventHandler(handler toolscache.ResourceEventHandler) (toolscache.ResourceEventHandlerRegistration, error) { - handles := make(map[string]toolscache.ResourceEventHandlerRegistration, len(i.namespaceToInformer)) + handles := handlerRegistration{handles: make(map[string]toolscache.ResourceEventHandlerRegistration, len(i.namespaceToInformer))} for ns, informer := range i.namespaceToInformer { registration, err := informer.AddEventHandler(handler) if err != nil { return nil, err } - handles[ns] = registration + handles.handles[ns] = registration } return handles, nil } // AddEventHandlerWithResyncPeriod adds the handler with a resync period to each namespaced informer. func (i *multiNamespaceInformer) AddEventHandlerWithResyncPeriod(handler toolscache.ResourceEventHandler, resyncPeriod time.Duration) (toolscache.ResourceEventHandlerRegistration, error) { - handles := make(map[string]toolscache.ResourceEventHandlerRegistration, len(i.namespaceToInformer)) + handles := handlerRegistration{handles: make(map[string]toolscache.ResourceEventHandlerRegistration, len(i.namespaceToInformer))} for ns, informer := range i.namespaceToInformer { registration, err := informer.AddEventHandlerWithResyncPeriod(handler, resyncPeriod) if err != nil { return nil, err } - handles[ns] = registration + handles.handles[ns] = registration } return handles, nil } // RemoveEventHandler removes a formerly added event handler given by its registration handle. func (i *multiNamespaceInformer) RemoveEventHandler(h toolscache.ResourceEventHandlerRegistration) error { - handles, ok := h.(map[string]toolscache.ResourceEventHandlerRegistration) + handles, ok := h.(handlerRegistration) if !ok { return fmt.Errorf("it is not the registration returned by multiNamespaceInformer") } for ns, informer := range i.namespaceToInformer { - registration, ok := handles[ns] + registration, ok := handles.handles[ns] if !ok { continue } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/certwatcher.go b/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/certwatcher.go index 1030013db3..2b9b60d8d7 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/certwatcher.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/certwatcher.go @@ -19,9 +19,14 @@ package certwatcher import ( "context" "crypto/tls" + "fmt" "sync" + "time" "github.com/fsnotify/fsnotify" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" "sigs.k8s.io/controller-runtime/pkg/certwatcher/metrics" logf "sigs.k8s.io/controller-runtime/pkg/internal/log" ) @@ -39,6 +44,9 @@ type CertWatcher struct { certPath string keyPath string + + // callback is a function to be invoked when the certificate changes. + callback func(tls.Certificate) } // New returns a new CertWatcher watching the given certificate and key. @@ -63,6 +71,17 @@ func New(certPath, keyPath string) (*CertWatcher, error) { return cw, nil } +// RegisterCallback registers a callback to be invoked when the certificate changes. +func (cw *CertWatcher) RegisterCallback(callback func(tls.Certificate)) { + cw.Lock() + defer cw.Unlock() + // If the current certificate is not nil, invoke the callback immediately. + if cw.currentCert != nil { + callback(*cw.currentCert) + } + cw.callback = callback +} + // GetCertificate fetches the currently loaded certificate, which may be nil. func (cw *CertWatcher) GetCertificate(_ *tls.ClientHelloInfo) (*tls.Certificate, error) { cw.RLock() @@ -72,11 +91,22 @@ func (cw *CertWatcher) GetCertificate(_ *tls.ClientHelloInfo) (*tls.Certificate, // Start starts the watch on the certificate and key files. func (cw *CertWatcher) Start(ctx context.Context) error { - files := []string{cw.certPath, cw.keyPath} - - for _, f := range files { - if err := cw.watcher.Add(f); err != nil { - return err + files := sets.New(cw.certPath, cw.keyPath) + + { + var watchErr error + if err := wait.PollUntilContextTimeout(ctx, 1*time.Second, 10*time.Second, true, func(ctx context.Context) (done bool, err error) { + for _, f := range files.UnsortedList() { + if err := cw.watcher.Add(f); err != nil { + watchErr = err + return false, nil //nolint:nilerr // We want to keep trying. + } + // We've added the watch, remove it from the set. + files.Delete(f) + } + return true, nil + }); err != nil { + return fmt.Errorf("failed to add watches: %w", kerrors.NewAggregate([]error{err, watchErr})) } } @@ -130,6 +160,14 @@ func (cw *CertWatcher) ReadCertificate() error { log.Info("Updated current TLS certificate") + // If a callback is registered, invoke it with the new certificate. + cw.RLock() + defer cw.RUnlock() + if cw.callback != nil { + go func() { + cw.callback(cert) + }() + } return nil } @@ -154,13 +192,13 @@ func (cw *CertWatcher) handleEvent(event fsnotify.Event) { } func isWrite(event fsnotify.Event) bool { - return event.Op&fsnotify.Write == fsnotify.Write + return event.Op.Has(fsnotify.Write) } func isCreate(event fsnotify.Event) bool { - return event.Op&fsnotify.Create == fsnotify.Create + return event.Op.Has(fsnotify.Create) } func isRemove(event fsnotify.Event) bool { - return event.Op&fsnotify.Remove == fsnotify.Remove + return event.Op.Has(fsnotify.Remove) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go index 8e2ac48fa2..6a1bfb546e 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go @@ -20,7 +20,9 @@ limitations under the License. package apiutil import ( + "errors" "fmt" + "net/http" "reflect" "sync" @@ -30,6 +32,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/discovery" + "k8s.io/client-go/dynamic" clientgoscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "k8s.io/client-go/restmapper" @@ -59,9 +62,13 @@ func AddToProtobufScheme(addToScheme func(*runtime.Scheme) error) error { // NewDiscoveryRESTMapper constructs a new RESTMapper based on discovery // information fetched by a new client with the given config. -func NewDiscoveryRESTMapper(c *rest.Config) (meta.RESTMapper, error) { +func NewDiscoveryRESTMapper(c *rest.Config, httpClient *http.Client) (meta.RESTMapper, error) { + if httpClient == nil { + return nil, fmt.Errorf("httpClient must not be nil, consider using rest.HTTPClientFor(c) to create a client") + } + // Get a mapper - dc, err := discovery.NewDiscoveryClientForConfig(c) + dc, err := discovery.NewDiscoveryClientForConfigAndClient(c, httpClient) if err != nil { return nil, err } @@ -72,6 +79,36 @@ func NewDiscoveryRESTMapper(c *rest.Config) (meta.RESTMapper, error) { return restmapper.NewDiscoveryRESTMapper(gr), nil } +// IsObjectNamespaced returns true if the object is namespace scoped. +// For unstructured objects the gvk is found from the object itself. +func IsObjectNamespaced(obj runtime.Object, scheme *runtime.Scheme, restmapper meta.RESTMapper) (bool, error) { + gvk, err := GVKForObject(obj, scheme) + if err != nil { + return false, err + } + + return IsGVKNamespaced(gvk, restmapper) +} + +// IsGVKNamespaced returns true if the object having the provided +// GVK is namespace scoped. +func IsGVKNamespaced(gvk schema.GroupVersionKind, restmapper meta.RESTMapper) (bool, error) { + restmapping, err := restmapper.RESTMapping(schema.GroupKind{Group: gvk.Group, Kind: gvk.Kind}) + if err != nil { + return false, fmt.Errorf("failed to get restmapping: %w", err) + } + + scope := restmapping.Scope.Name() + if scope == "" { + return false, errors.New("scope cannot be identified, empty scope returned") + } + + if scope != meta.RESTScopeNameRoot { + return true, nil + } + return false, nil +} + // GVKForObject finds the GroupVersionKind associated with the given object, if there is only a single such GVK. func GVKForObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersionKind, error) { // TODO(directxman12): do we want to generalize this to arbitrary container types? @@ -142,21 +179,11 @@ func GVKForObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersi // RESTClientForGVK constructs a new rest.Interface capable of accessing the resource associated // with the given GroupVersionKind. The REST client will be configured to use the negotiated serializer from // baseConfig, if set, otherwise a default serializer will be set. -func RESTClientForGVK(gvk schema.GroupVersionKind, isUnstructured bool, baseConfig *rest.Config, codecs serializer.CodecFactory) (rest.Interface, error) { - return rest.RESTClientFor(createRestConfig(gvk, isUnstructured, baseConfig, codecs)) -} - -// serializerWithDecodedGVK is a CodecFactory that overrides the DecoderToVersion of a WithoutConversionCodecFactory -// in order to avoid clearing the GVK from the decoded object. -// -// See https://github.com/kubernetes/kubernetes/issues/80609. -type serializerWithDecodedGVK struct { - serializer.WithoutConversionCodecFactory -} - -// DecoderToVersion returns an decoder that does not do conversion. -func (f serializerWithDecodedGVK) DecoderToVersion(serializer runtime.Decoder, _ runtime.GroupVersioner) runtime.Decoder { - return serializer +func RESTClientForGVK(gvk schema.GroupVersionKind, isUnstructured bool, baseConfig *rest.Config, codecs serializer.CodecFactory, httpClient *http.Client) (rest.Interface, error) { + if httpClient == nil { + return nil, fmt.Errorf("httpClient must not be nil, consider using rest.HTTPClientFor(c) to create a client") + } + return rest.RESTClientForConfigAndClient(createRestConfig(gvk, isUnstructured, baseConfig, codecs), httpClient) } // createRestConfig copies the base config and updates needed fields for a new rest config. @@ -183,9 +210,8 @@ func createRestConfig(gvk schema.GroupVersionKind, isUnstructured bool, baseConf } if isUnstructured { - // If the object is unstructured, we need to preserve the GVK information. - // Use our own custom serializer. - cfg.NegotiatedSerializer = serializerWithDecodedGVK{serializer.WithoutConversionCodecFactory{CodecFactory: codecs}} + // If the object is unstructured, we use the client-go dynamic serializer. + cfg = dynamic.ConfigFor(cfg) } else { cfg.NegotiatedSerializer = serializerWithTargetZeroingDecode{NegotiatedSerializer: serializer.WithoutConversionCodecFactory{CodecFactory: codecs}} } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go deleted file mode 100644 index 6b9dcf68ad..0000000000 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go +++ /dev/null @@ -1,301 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apiutil - -import ( - "sync" - "sync/atomic" - - "golang.org/x/time/rate" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/discovery" - "k8s.io/client-go/rest" - "k8s.io/client-go/restmapper" -) - -// dynamicRESTMapper is a RESTMapper that dynamically discovers resource -// types at runtime. -type dynamicRESTMapper struct { - mu sync.RWMutex // protects the following fields - staticMapper meta.RESTMapper - limiter *rate.Limiter - newMapper func() (meta.RESTMapper, error) - - lazy bool - // Used for lazy init. - inited uint32 - initMtx sync.Mutex - - useLazyRestmapper bool -} - -// DynamicRESTMapperOption is a functional option on the dynamicRESTMapper. -type DynamicRESTMapperOption func(*dynamicRESTMapper) error - -// WithLimiter sets the RESTMapper's underlying limiter to lim. -func WithLimiter(lim *rate.Limiter) DynamicRESTMapperOption { - return func(drm *dynamicRESTMapper) error { - drm.limiter = lim - return nil - } -} - -// WithLazyDiscovery prevents the RESTMapper from discovering REST mappings -// until an API call is made. -var WithLazyDiscovery DynamicRESTMapperOption = func(drm *dynamicRESTMapper) error { - drm.lazy = true - return nil -} - -// WithExperimentalLazyMapper enables experimental more advanced Lazy Restmapping mechanism. -var WithExperimentalLazyMapper DynamicRESTMapperOption = func(drm *dynamicRESTMapper) error { - drm.useLazyRestmapper = true - return nil -} - -// WithCustomMapper supports setting a custom RESTMapper refresher instead of -// the default method, which uses a discovery client. -// -// This exists mainly for testing, but can be useful if you need tighter control -// over how discovery is performed, which discovery endpoints are queried, etc. -func WithCustomMapper(newMapper func() (meta.RESTMapper, error)) DynamicRESTMapperOption { - return func(drm *dynamicRESTMapper) error { - drm.newMapper = newMapper - return nil - } -} - -// NewDynamicRESTMapper returns a dynamic RESTMapper for cfg. The dynamic -// RESTMapper dynamically discovers resource types at runtime. opts -// configure the RESTMapper. -func NewDynamicRESTMapper(cfg *rest.Config, opts ...DynamicRESTMapperOption) (meta.RESTMapper, error) { - client, err := discovery.NewDiscoveryClientForConfig(cfg) - if err != nil { - return nil, err - } - drm := &dynamicRESTMapper{ - limiter: rate.NewLimiter(rate.Limit(defaultRefillRate), defaultLimitSize), - newMapper: func() (meta.RESTMapper, error) { - groupResources, err := restmapper.GetAPIGroupResources(client) - if err != nil { - return nil, err - } - return restmapper.NewDiscoveryRESTMapper(groupResources), nil - }, - } - for _, opt := range opts { - if err = opt(drm); err != nil { - return nil, err - } - } - if drm.useLazyRestmapper { - return newLazyRESTMapperWithClient(client) - } - if !drm.lazy { - if err := drm.setStaticMapper(); err != nil { - return nil, err - } - } - return drm, nil -} - -var ( - // defaultRefilRate is the default rate at which potential calls are - // added back to the "bucket" of allowed calls. - defaultRefillRate = 5 - // defaultLimitSize is the default starting/max number of potential calls - // per second. Once a call is used, it's added back to the bucket at a rate - // of defaultRefillRate per second. - defaultLimitSize = 5 -) - -// setStaticMapper sets drm's staticMapper by querying its client, regardless -// of reload backoff. -func (drm *dynamicRESTMapper) setStaticMapper() error { - newMapper, err := drm.newMapper() - if err != nil { - return err - } - drm.staticMapper = newMapper - return nil -} - -// init initializes drm only once if drm is lazy. -func (drm *dynamicRESTMapper) init() (err error) { - // skip init if drm is not lazy or has initialized - if !drm.lazy || atomic.LoadUint32(&drm.inited) != 0 { - return nil - } - - drm.initMtx.Lock() - defer drm.initMtx.Unlock() - if drm.inited == 0 { - if err = drm.setStaticMapper(); err == nil { - atomic.StoreUint32(&drm.inited, 1) - } - } - return err -} - -// checkAndReload attempts to call the given callback, which is assumed to be dependent -// on the data in the restmapper. -// -// If the callback returns an error matching meta.IsNoMatchErr, it will attempt to reload -// the RESTMapper's data and re-call the callback once that's occurred. -// If the callback returns any other error, the function will return immediately regardless. -// -// It will take care of ensuring that reloads are rate-limited and that extraneous calls -// aren't made. If a reload would exceed the limiters rate, it returns the error return by -// the callback. -// It's thread-safe, and worries about thread-safety for the callback (so the callback does -// not need to attempt to lock the restmapper). -func (drm *dynamicRESTMapper) checkAndReload(checkNeedsReload func() error) error { - // first, check the common path -- data is fresh enough - // (use an IIFE for the lock's defer) - err := func() error { - drm.mu.RLock() - defer drm.mu.RUnlock() - - return checkNeedsReload() - }() - - needsReload := meta.IsNoMatchError(err) - if !needsReload { - return err - } - - // if the data wasn't fresh, we'll need to try and update it, so grab the lock... - drm.mu.Lock() - defer drm.mu.Unlock() - - // ... and double-check that we didn't reload in the meantime - err = checkNeedsReload() - needsReload = meta.IsNoMatchError(err) - if !needsReload { - return err - } - - // we're still stale, so grab a rate-limit token if we can... - if !drm.limiter.Allow() { - // return error from static mapper here, we have refreshed often enough (exceeding rate of provided limiter) - // so that client's can handle this the same way as a "normal" NoResourceMatchError / NoKindMatchError - return err - } - - // ...reload... - if err := drm.setStaticMapper(); err != nil { - return err - } - - // ...and return the results of the closure regardless - return checkNeedsReload() -} - -// TODO: wrap reload errors on NoKindMatchError with go 1.13 errors. - -func (drm *dynamicRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { - if err := drm.init(); err != nil { - return schema.GroupVersionKind{}, err - } - var gvk schema.GroupVersionKind - err := drm.checkAndReload(func() error { - var err error - gvk, err = drm.staticMapper.KindFor(resource) - return err - }) - return gvk, err -} - -func (drm *dynamicRESTMapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { - if err := drm.init(); err != nil { - return nil, err - } - var gvks []schema.GroupVersionKind - err := drm.checkAndReload(func() error { - var err error - gvks, err = drm.staticMapper.KindsFor(resource) - return err - }) - return gvks, err -} - -func (drm *dynamicRESTMapper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { - if err := drm.init(); err != nil { - return schema.GroupVersionResource{}, err - } - - var gvr schema.GroupVersionResource - err := drm.checkAndReload(func() error { - var err error - gvr, err = drm.staticMapper.ResourceFor(input) - return err - }) - return gvr, err -} - -func (drm *dynamicRESTMapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { - if err := drm.init(); err != nil { - return nil, err - } - var gvrs []schema.GroupVersionResource - err := drm.checkAndReload(func() error { - var err error - gvrs, err = drm.staticMapper.ResourcesFor(input) - return err - }) - return gvrs, err -} - -func (drm *dynamicRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { - if err := drm.init(); err != nil { - return nil, err - } - var mapping *meta.RESTMapping - err := drm.checkAndReload(func() error { - var err error - mapping, err = drm.staticMapper.RESTMapping(gk, versions...) - return err - }) - return mapping, err -} - -func (drm *dynamicRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { - if err := drm.init(); err != nil { - return nil, err - } - var mappings []*meta.RESTMapping - err := drm.checkAndReload(func() error { - var err error - mappings, err = drm.staticMapper.RESTMappings(gk, versions...) - return err - }) - return mappings, err -} - -func (drm *dynamicRESTMapper) ResourceSingularizer(resource string) (string, error) { - if err := drm.init(); err != nil { - return "", err - } - var singular string - err := drm.checkAndReload(func() error { - var err error - singular, err = drm.staticMapper.ResourceSingularizer(resource) - return err - }) - return singular, err -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/lazyrestmapper.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go similarity index 57% rename from vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/lazyrestmapper.go rename to vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go index e9b1e710c2..e0ff72dc13 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/lazyrestmapper.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go @@ -18,137 +18,151 @@ package apiutil import ( "fmt" + "net/http" "sync" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" "k8s.io/client-go/restmapper" ) -// lazyRESTMapper is a RESTMapper that will lazily query the provided +// NewDynamicRESTMapper returns a dynamic RESTMapper for cfg. The dynamic +// RESTMapper dynamically discovers resource types at runtime. +func NewDynamicRESTMapper(cfg *rest.Config, httpClient *http.Client) (meta.RESTMapper, error) { + if httpClient == nil { + return nil, fmt.Errorf("httpClient must not be nil, consider using rest.HTTPClientFor(c) to create a client") + } + + client, err := discovery.NewDiscoveryClientForConfigAndClient(cfg, httpClient) + if err != nil { + return nil, err + } + return &mapper{ + mapper: restmapper.NewDiscoveryRESTMapper([]*restmapper.APIGroupResources{}), + client: client, + knownGroups: map[string]*restmapper.APIGroupResources{}, + apiGroups: map[string]*metav1.APIGroup{}, + }, nil +} + +// mapper is a RESTMapper that will lazily query the provided // client for discovery information to do REST mappings. -type lazyRESTMapper struct { +type mapper struct { mapper meta.RESTMapper client *discovery.DiscoveryClient knownGroups map[string]*restmapper.APIGroupResources - apiGroups []metav1.APIGroup + apiGroups map[string]*metav1.APIGroup // mutex to provide thread-safe mapper reloading. - mu sync.Mutex -} - -// newLazyRESTMapperWithClient initializes a LazyRESTMapper with a custom discovery client. -func newLazyRESTMapperWithClient(discoveryClient *discovery.DiscoveryClient) (meta.RESTMapper, error) { - return &lazyRESTMapper{ - mapper: restmapper.NewDiscoveryRESTMapper([]*restmapper.APIGroupResources{}), - client: discoveryClient, - knownGroups: map[string]*restmapper.APIGroupResources{}, - apiGroups: []metav1.APIGroup{}, - }, nil + mu sync.RWMutex } // KindFor implements Mapper.KindFor. -func (m *lazyRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { - res, err := m.mapper.KindFor(resource) +func (m *mapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + res, err := m.getMapper().KindFor(resource) if meta.IsNoMatchError(err) { - if err = m.addKnownGroupAndReload(resource.Group, resource.Version); err != nil { - return res, err + if err := m.addKnownGroupAndReload(resource.Group, resource.Version); err != nil { + return schema.GroupVersionKind{}, err } - - res, err = m.mapper.KindFor(resource) + res, err = m.getMapper().KindFor(resource) } return res, err } // KindsFor implements Mapper.KindsFor. -func (m *lazyRESTMapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { - res, err := m.mapper.KindsFor(resource) +func (m *mapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { + res, err := m.getMapper().KindsFor(resource) if meta.IsNoMatchError(err) { - if err = m.addKnownGroupAndReload(resource.Group, resource.Version); err != nil { - return res, err + if err := m.addKnownGroupAndReload(resource.Group, resource.Version); err != nil { + return nil, err } - - res, err = m.mapper.KindsFor(resource) + res, err = m.getMapper().KindsFor(resource) } return res, err } // ResourceFor implements Mapper.ResourceFor. -func (m *lazyRESTMapper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { - res, err := m.mapper.ResourceFor(input) +func (m *mapper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { + res, err := m.getMapper().ResourceFor(input) if meta.IsNoMatchError(err) { - if err = m.addKnownGroupAndReload(input.Group, input.Version); err != nil { - return res, err + if err := m.addKnownGroupAndReload(input.Group, input.Version); err != nil { + return schema.GroupVersionResource{}, err } - - res, err = m.mapper.ResourceFor(input) + res, err = m.getMapper().ResourceFor(input) } return res, err } // ResourcesFor implements Mapper.ResourcesFor. -func (m *lazyRESTMapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { - res, err := m.mapper.ResourcesFor(input) +func (m *mapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + res, err := m.getMapper().ResourcesFor(input) if meta.IsNoMatchError(err) { - if err = m.addKnownGroupAndReload(input.Group, input.Version); err != nil { - return res, err + if err := m.addKnownGroupAndReload(input.Group, input.Version); err != nil { + return nil, err } - - res, err = m.mapper.ResourcesFor(input) + res, err = m.getMapper().ResourcesFor(input) } return res, err } // RESTMapping implements Mapper.RESTMapping. -func (m *lazyRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { - res, err := m.mapper.RESTMapping(gk, versions...) +func (m *mapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { + res, err := m.getMapper().RESTMapping(gk, versions...) if meta.IsNoMatchError(err) { - if err = m.addKnownGroupAndReload(gk.Group, versions...); err != nil { - return res, err + if err := m.addKnownGroupAndReload(gk.Group, versions...); err != nil { + return nil, err } - - res, err = m.mapper.RESTMapping(gk, versions...) + res, err = m.getMapper().RESTMapping(gk, versions...) } return res, err } // RESTMappings implements Mapper.RESTMappings. -func (m *lazyRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { - res, err := m.mapper.RESTMappings(gk, versions...) +func (m *mapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { + res, err := m.getMapper().RESTMappings(gk, versions...) if meta.IsNoMatchError(err) { - if err = m.addKnownGroupAndReload(gk.Group, versions...); err != nil { - return res, err + if err := m.addKnownGroupAndReload(gk.Group, versions...); err != nil { + return nil, err } - - res, err = m.mapper.RESTMappings(gk, versions...) + res, err = m.getMapper().RESTMappings(gk, versions...) } return res, err } // ResourceSingularizer implements Mapper.ResourceSingularizer. -func (m *lazyRESTMapper) ResourceSingularizer(resource string) (string, error) { - return m.mapper.ResourceSingularizer(resource) +func (m *mapper) ResourceSingularizer(resource string) (string, error) { + return m.getMapper().ResourceSingularizer(resource) +} + +func (m *mapper) getMapper() meta.RESTMapper { + m.mu.RLock() + defer m.mu.RUnlock() + return m.mapper } // addKnownGroupAndReload reloads the mapper with updated information about missing API group. // versions can be specified for partial updates, for instance for v1beta1 version only. -func (m *lazyRESTMapper) addKnownGroupAndReload(groupName string, versions ...string) error { - m.mu.Lock() - defer m.mu.Unlock() +func (m *mapper) addKnownGroupAndReload(groupName string, versions ...string) error { + // versions will here be [""] if the forwarded Version value of + // GroupVersionResource (in calling method) was not specified. + if len(versions) == 1 && versions[0] == "" { + versions = nil + } // If no specific versions are set by user, we will scan all available ones for the API group. // This operation requires 2 requests: /api and /apis, but only once. For all subsequent calls // this data will be taken from cache. if len(versions) == 0 { - apiGroup, err := m.findAPIGroupByNameLocked(groupName) + apiGroup, err := m.findAPIGroupByName(groupName) if err != nil { return err } @@ -157,6 +171,9 @@ func (m *lazyRESTMapper) addKnownGroupAndReload(groupName string, versions ...st } } + m.mu.Lock() + defer m.mu.Unlock() + // Create or fetch group resources from cache. groupResources := &restmapper.APIGroupResources{ Group: metav1.APIGroup{Name: groupName}, @@ -205,43 +222,53 @@ func (m *lazyRESTMapper) addKnownGroupAndReload(groupName string, versions ...st } m.mapper = restmapper.NewDiscoveryRESTMapper(updatedGroupResources) - return nil } // findAPIGroupByNameLocked returns API group by its name. -func (m *lazyRESTMapper) findAPIGroupByNameLocked(groupName string) (metav1.APIGroup, error) { +func (m *mapper) findAPIGroupByName(groupName string) (*metav1.APIGroup, error) { // Looking in the cache first. - for _, apiGroup := range m.apiGroups { - if groupName == apiGroup.Name { - return apiGroup, nil + { + m.mu.RLock() + group, ok := m.apiGroups[groupName] + m.mu.RUnlock() + if ok { + return group, nil } } // Update the cache if nothing was found. apiGroups, err := m.client.ServerGroups() if err != nil { - return metav1.APIGroup{}, fmt.Errorf("failed to get server groups: %w", err) + return nil, fmt.Errorf("failed to get server groups: %w", err) } if len(apiGroups.Groups) == 0 { - return metav1.APIGroup{}, fmt.Errorf("received an empty API groups list") + return nil, fmt.Errorf("received an empty API groups list") } - m.apiGroups = apiGroups.Groups + m.mu.Lock() + for i := range apiGroups.Groups { + group := &apiGroups.Groups[i] + m.apiGroups[group.Name] = group + } + m.mu.Unlock() // Looking in the cache again. - for _, apiGroup := range m.apiGroups { - if groupName == apiGroup.Name { - return apiGroup, nil + { + m.mu.RLock() + group, ok := m.apiGroups[groupName] + m.mu.RUnlock() + if ok { + return group, nil } } // If there is still nothing, return an error. - return metav1.APIGroup{}, fmt.Errorf("failed to find API group %s", groupName) + return nil, fmt.Errorf("failed to find API group %q", groupName) } // fetchGroupVersionResources fetches the resources for the specified group and its versions. -func (m *lazyRESTMapper) fetchGroupVersionResources(groupName string, versions ...string) (map[schema.GroupVersion]*metav1.APIResourceList, error) { +func (m *mapper) fetchGroupVersionResources(groupName string, versions ...string) (map[schema.GroupVersion]*metav1.APIResourceList, error) { groupVersionResources := make(map[schema.GroupVersion]*metav1.APIResourceList) failedGroups := make(map[schema.GroupVersion]error) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go index 7d1ed5c968..0d8b9fbe18 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go @@ -20,11 +20,11 @@ import ( "context" "errors" "fmt" + "net/http" "strings" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" @@ -36,6 +36,28 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log" ) +// Options are creation options for a Client. +type Options struct { + // HTTPClient is the HTTP client to use for requests. + HTTPClient *http.Client + + // Scheme, if provided, will be used to map go structs to GroupVersionKinds + Scheme *runtime.Scheme + + // Mapper, if provided, will be used to map GroupVersionKinds to Resources + Mapper meta.RESTMapper + + // Cache, if provided, is used to read objects from the cache. + Cache *CacheOptions + + // WarningHandler is used to configure the warning handler responsible for + // surfacing and handling warnings messages sent by the API server. + WarningHandler WarningHandlerOptions + + // DryRun instructs the client to only perform dry run requests. + DryRun *bool +} + // WarningHandlerOptions are options for configuring a // warning handler for the client which is responsible // for surfacing API Server warnings. @@ -50,19 +72,21 @@ type WarningHandlerOptions struct { AllowDuplicateLogs bool } -// Options are creation options for a Client. -type Options struct { - // Scheme, if provided, will be used to map go structs to GroupVersionKinds - Scheme *runtime.Scheme - - // Mapper, if provided, will be used to map GroupVersionKinds to Resources - Mapper meta.RESTMapper - - // Opts is used to configure the warning handler responsible for - // surfacing and handling warnings messages sent by the API server. - Opts WarningHandlerOptions +// CacheOptions are options for creating a cache-backed client. +type CacheOptions struct { + // Reader is a cache-backed reader that will be used to read objects from the cache. + // +required + Reader Reader + // DisableFor is a list of objects that should not be read from the cache. + DisableFor []Object + // Unstructured is a flag that indicates whether the cache-backed client should + // read unstructured objects or lists from the cache. + Unstructured bool } +// NewClientFunc allows a user to define how to create a client. +type NewClientFunc func(config *rest.Config, options Options) (Client, error) + // New returns a new Client using the provided config and Options. // The returned client reads *and* writes directly from the server // (it doesn't use object caches). It understands how to work with @@ -73,8 +97,12 @@ type Options struct { // corresponding group, version, and kind for the given type. In the // case of unstructured types, the group, version, and kind will be extracted // from the corresponding fields on the object. -func New(config *rest.Config, options Options) (Client, error) { - return newClient(config, options) +func New(config *rest.Config, options Options) (c Client, err error) { + c, err = newClient(config, options) + if err == nil && options.DryRun != nil && *options.DryRun { + c = NewDryRunClient(c) + } + return c, err } func newClient(config *rest.Config, options Options) (*client, error) { @@ -82,22 +110,35 @@ func newClient(config *rest.Config, options Options) (*client, error) { return nil, fmt.Errorf("must provide non-nil rest.Config to client.New") } - if !options.Opts.SuppressWarnings { + config = rest.CopyConfig(config) + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + if !options.WarningHandler.SuppressWarnings { // surface warnings logger := log.Log.WithName("KubeAPIWarningLogger") // Set a WarningHandler, the default WarningHandler // is log.KubeAPIWarningLogger with deduplication enabled. // See log.KubeAPIWarningLoggerOptions for considerations // regarding deduplication. - config = rest.CopyConfig(config) config.WarningHandler = log.NewKubeAPIWarningLogger( logger, log.KubeAPIWarningLoggerOptions{ - Deduplicate: !options.Opts.AllowDuplicateLogs, + Deduplicate: !options.WarningHandler.AllowDuplicateLogs, }, ) } + // Use the rest HTTP client for the provided config if unset + if options.HTTPClient == nil { + var err error + options.HTTPClient, err = rest.HTTPClientFor(config) + if err != nil { + return nil, err + } + } + // Init a scheme if none provided if options.Scheme == nil { options.Scheme = scheme.Scheme @@ -106,34 +147,35 @@ func newClient(config *rest.Config, options Options) (*client, error) { // Init a Mapper if none provided if options.Mapper == nil { var err error - options.Mapper, err = apiutil.NewDynamicRESTMapper(config) + options.Mapper, err = apiutil.NewDynamicRESTMapper(config, options.HTTPClient) if err != nil { return nil, err } } - clientcache := &clientCache{ - config: config, - scheme: options.Scheme, - mapper: options.Mapper, - codecs: serializer.NewCodecFactory(options.Scheme), + resources := &clientRestResources{ + httpClient: options.HTTPClient, + config: config, + scheme: options.Scheme, + mapper: options.Mapper, + codecs: serializer.NewCodecFactory(options.Scheme), structuredResourceByType: make(map[schema.GroupVersionKind]*resourceMeta), unstructuredResourceByType: make(map[schema.GroupVersionKind]*resourceMeta), } - rawMetaClient, err := metadata.NewForConfig(config) + rawMetaClient, err := metadata.NewForConfigAndClient(metadata.ConfigFor(config), options.HTTPClient) if err != nil { return nil, fmt.Errorf("unable to construct metadata-only client for use as part of client: %w", err) } c := &client{ typedClient: typedClient{ - cache: clientcache, + resources: resources, paramCodec: runtime.NewParameterCodec(options.Scheme), }, unstructuredClient: unstructuredClient{ - cache: clientcache, + resources: resources, paramCodec: noConversionParamCodec{}, }, metadataClient: metadataClient{ @@ -143,20 +185,65 @@ func newClient(config *rest.Config, options Options) (*client, error) { scheme: options.Scheme, mapper: options.Mapper, } + if options.Cache == nil || options.Cache.Reader == nil { + return c, nil + } + // We want a cache if we're here. + // Set the cache. + c.cache = options.Cache.Reader + + // Load uncached GVKs. + c.cacheUnstructured = options.Cache.Unstructured + c.uncachedGVKs = map[schema.GroupVersionKind]struct{}{} + for _, obj := range options.Cache.DisableFor { + gvk, err := c.GroupVersionKindFor(obj) + if err != nil { + return nil, err + } + c.uncachedGVKs[gvk] = struct{}{} + } return c, nil } var _ Client = &client{} -// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes -// new clients at the time they are used, and caches the client. +// client is a client.Client that reads and writes directly from/to an API server. +// It lazily initializes new clients at the time they are used. type client struct { typedClient typedClient unstructuredClient unstructuredClient metadataClient metadataClient scheme *runtime.Scheme mapper meta.RESTMapper + + cache Reader + uncachedGVKs map[schema.GroupVersionKind]struct{} + cacheUnstructured bool +} + +func (c *client) shouldBypassCache(obj runtime.Object) (bool, error) { + if c.cache == nil { + return true, nil + } + + gvk, err := c.GroupVersionKindFor(obj) + if err != nil { + return false, err + } + // TODO: this is producing unsafe guesses that don't actually work, + // but it matches ~99% of the cases out there. + if meta.IsListType(obj) { + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") + } + if _, isUncached := c.uncachedGVKs[gvk]; isUncached { + return true, nil + } + if !c.cacheUnstructured { + _, isUnstructured := obj.(runtime.Unstructured) + return isUnstructured, nil + } + return false, nil } // resetGroupVersionKind is a helper function to restore and preserve GroupVersionKind on an object. @@ -168,6 +255,16 @@ func (c *client) resetGroupVersionKind(obj runtime.Object, gvk schema.GroupVersi } } +// GroupVersionKindFor returns the GroupVersionKind for the given object. +func (c *client) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + return apiutil.GVKForObject(obj, c.scheme) +} + +// IsObjectNamespaced returns true if the GroupVersionKind of the object is namespaced. +func (c *client) IsObjectNamespaced(obj runtime.Object) (bool, error) { + return apiutil.IsObjectNamespaced(obj, c.scheme, c.mapper) +} + // Scheme returns the scheme this client is using. func (c *client) Scheme() *runtime.Scheme { return c.scheme @@ -181,7 +278,7 @@ func (c *client) RESTMapper() meta.RESTMapper { // Create implements client.Client. func (c *client) Create(ctx context.Context, obj Object, opts ...CreateOption) error { switch obj.(type) { - case *unstructured.Unstructured: + case runtime.Unstructured: return c.unstructuredClient.Create(ctx, obj, opts...) case *metav1.PartialObjectMetadata: return fmt.Errorf("cannot create using only metadata") @@ -194,7 +291,7 @@ func (c *client) Create(ctx context.Context, obj Object, opts ...CreateOption) e func (c *client) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) switch obj.(type) { - case *unstructured.Unstructured: + case runtime.Unstructured: return c.unstructuredClient.Update(ctx, obj, opts...) case *metav1.PartialObjectMetadata: return fmt.Errorf("cannot update using only metadata -- did you mean to patch?") @@ -206,7 +303,7 @@ func (c *client) Update(ctx context.Context, obj Object, opts ...UpdateOption) e // Delete implements client.Client. func (c *client) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { switch obj.(type) { - case *unstructured.Unstructured: + case runtime.Unstructured: return c.unstructuredClient.Delete(ctx, obj, opts...) case *metav1.PartialObjectMetadata: return c.metadataClient.Delete(ctx, obj, opts...) @@ -218,7 +315,7 @@ func (c *client) Delete(ctx context.Context, obj Object, opts ...DeleteOption) e // DeleteAllOf implements client.Client. func (c *client) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { switch obj.(type) { - case *unstructured.Unstructured: + case runtime.Unstructured: return c.unstructuredClient.DeleteAllOf(ctx, obj, opts...) case *metav1.PartialObjectMetadata: return c.metadataClient.DeleteAllOf(ctx, obj, opts...) @@ -231,7 +328,7 @@ func (c *client) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllO func (c *client) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) switch obj.(type) { - case *unstructured.Unstructured: + case runtime.Unstructured: return c.unstructuredClient.Patch(ctx, obj, patch, opts...) case *metav1.PartialObjectMetadata: return c.metadataClient.Patch(ctx, obj, patch, opts...) @@ -242,8 +339,14 @@ func (c *client) Patch(ctx context.Context, obj Object, patch Patch, opts ...Pat // Get implements client.Client. func (c *client) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error { + if isUncached, err := c.shouldBypassCache(obj); err != nil { + return err + } else if !isUncached { + return c.cache.Get(ctx, key, obj, opts...) + } + switch obj.(type) { - case *unstructured.Unstructured: + case runtime.Unstructured: return c.unstructuredClient.Get(ctx, key, obj, opts...) case *metav1.PartialObjectMetadata: // Metadata only object should always preserve the GVK coming in from the caller. @@ -256,8 +359,14 @@ func (c *client) Get(ctx context.Context, key ObjectKey, obj Object, opts ...Get // List implements client.Client. func (c *client) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + if isUncached, err := c.shouldBypassCache(obj); err != nil { + return err + } else if !isUncached { + return c.cache.List(ctx, obj, opts...) + } + switch x := obj.(type) { - case *unstructured.UnstructuredList: + case runtime.Unstructured: return c.unstructuredClient.List(ctx, obj, opts...) case *metav1.PartialObjectMetadataList: // Metadata only object should always preserve the GVK. @@ -431,7 +540,7 @@ func (po *SubResourcePatchOptions) ApplyToSubResourcePatch(o *SubResourcePatchOp func (sc *subResourceClient) Get(ctx context.Context, obj Object, subResource Object, opts ...SubResourceGetOption) error { switch obj.(type) { - case *unstructured.Unstructured: + case runtime.Unstructured: return sc.client.unstructuredClient.GetSubResource(ctx, obj, subResource, sc.subResource, opts...) case *metav1.PartialObjectMetadata: return errors.New("can not get subresource using only metadata") @@ -446,7 +555,7 @@ func (sc *subResourceClient) Create(ctx context.Context, obj Object, subResource defer sc.client.resetGroupVersionKind(subResource, subResource.GetObjectKind().GroupVersionKind()) switch obj.(type) { - case *unstructured.Unstructured: + case runtime.Unstructured: return sc.client.unstructuredClient.CreateSubResource(ctx, obj, subResource, sc.subResource, opts...) case *metav1.PartialObjectMetadata: return fmt.Errorf("cannot update status using only metadata -- did you mean to patch?") @@ -459,7 +568,7 @@ func (sc *subResourceClient) Create(ctx context.Context, obj Object, subResource func (sc *subResourceClient) Update(ctx context.Context, obj Object, opts ...SubResourceUpdateOption) error { defer sc.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) switch obj.(type) { - case *unstructured.Unstructured: + case runtime.Unstructured: return sc.client.unstructuredClient.UpdateSubResource(ctx, obj, sc.subResource, opts...) case *metav1.PartialObjectMetadata: return fmt.Errorf("cannot update status using only metadata -- did you mean to patch?") @@ -472,7 +581,7 @@ func (sc *subResourceClient) Update(ctx context.Context, obj Object, opts ...Sub func (sc *subResourceClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...SubResourcePatchOption) error { defer sc.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) switch obj.(type) { - case *unstructured.Unstructured: + case runtime.Unstructured: return sc.client.unstructuredClient.PatchSubResource(ctx, obj, sc.subResource, patch, opts...) case *metav1.PartialObjectMetadata: return sc.client.metadataClient.PatchSubResource(ctx, obj, sc.subResource, patch, opts...) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_rest_resources.go similarity index 82% rename from vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go rename to vendor/sigs.k8s.io/controller-runtime/pkg/client/client_rest_resources.go index 857a0b38a7..2d07879520 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_rest_resources.go @@ -17,12 +17,12 @@ limitations under the License. package client import ( + "net/http" "strings" "sync" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" @@ -30,8 +30,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/apiutil" ) -// clientCache creates and caches rest clients and metadata for Kubernetes types. -type clientCache struct { +// clientRestResources creates and stores rest clients and metadata for Kubernetes types. +type clientRestResources struct { + // httpClient is the http client to use for requests + httpClient *http.Client + // config is the rest.Config to talk to an apiserver config *rest.Config @@ -44,22 +47,22 @@ type clientCache struct { // codecs are used to create a REST client for a gvk codecs serializer.CodecFactory - // structuredResourceByType caches structured type metadata + // structuredResourceByType stores structured type metadata structuredResourceByType map[schema.GroupVersionKind]*resourceMeta - // unstructuredResourceByType caches unstructured type metadata + // unstructuredResourceByType stores unstructured type metadata unstructuredResourceByType map[schema.GroupVersionKind]*resourceMeta mu sync.RWMutex } // newResource maps obj to a Kubernetes Resource and constructs a client for that Resource. // If the object is a list, the resource represents the item's type instead. -func (c *clientCache) newResource(gvk schema.GroupVersionKind, isList, isUnstructured bool) (*resourceMeta, error) { +func (c *clientRestResources) newResource(gvk schema.GroupVersionKind, isList, isUnstructured bool) (*resourceMeta, error) { if strings.HasSuffix(gvk.Kind, "List") && isList { // if this was a list, treat it as a request for the item's resource gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] } - client, err := apiutil.RESTClientForGVK(gvk, isUnstructured, c.config, c.codecs) + client, err := apiutil.RESTClientForGVK(gvk, isUnstructured, c.config, c.codecs, c.httpClient) if err != nil { return nil, err } @@ -72,15 +75,13 @@ func (c *clientCache) newResource(gvk schema.GroupVersionKind, isList, isUnstruc // getResource returns the resource meta information for the given type of object. // If the object is a list, the resource represents the item's type instead. -func (c *clientCache) getResource(obj runtime.Object) (*resourceMeta, error) { +func (c *clientRestResources) getResource(obj runtime.Object) (*resourceMeta, error) { gvk, err := apiutil.GVKForObject(obj, c.scheme) if err != nil { return nil, err } - _, isUnstructured := obj.(*unstructured.Unstructured) - _, isUnstructuredList := obj.(*unstructured.UnstructuredList) - isUnstructured = isUnstructured || isUnstructuredList + _, isUnstructured := obj.(runtime.Unstructured) // It's better to do creation work twice than to not let multiple // people make requests at once @@ -108,7 +109,7 @@ func (c *clientCache) getResource(obj runtime.Object) (*resourceMeta, error) { } // getObjMeta returns objMeta containing both type and object metadata and state. -func (c *clientCache) getObjMeta(obj runtime.Object) (*objMeta, error) { +func (c *clientRestResources) getObjMeta(obj runtime.Object) (*objMeta, error) { r, err := c.getResource(obj) if err != nil { return nil, err @@ -120,7 +121,7 @@ func (c *clientCache) getObjMeta(obj runtime.Object) (*objMeta, error) { return &objMeta{resourceMeta: r, Object: m}, err } -// resourceMeta caches state for a Kubernetes type. +// resourceMeta stores state for a Kubernetes type. type resourceMeta struct { // client is the rest client used to talk to the apiserver rest.Interface diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go index e4e8585cb0..5f0a6d4b1d 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go @@ -98,12 +98,12 @@ func GetConfigWithContext(context string) (*rest.Config, error) { if err != nil { return nil, err } - if cfg.QPS == 0.0 { cfg.QPS = 20.0 - cfg.Burst = 30.0 } - + if cfg.Burst == 0 { + cfg.Burst = 30 + } return cfg, nil } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go index e0e2885094..b2e2024942 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go @@ -26,8 +26,7 @@ limitations under the License. // to the API server. // // It is a common pattern in Kubernetes to read from a cache and write to the API -// server. This pattern is covered by the DelegatingClient type, which can -// be used to have a client whose Reader is different from the Writer. +// server. This pattern is covered by the creating the Client with a Cache. // // # Options // diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go index 73b56429e7..bbcdd38321 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go @@ -21,6 +21,7 @@ import ( "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" ) // NewDryRunClient wraps an existing client and enforces DryRun mode @@ -46,6 +47,16 @@ func (c *dryRunClient) RESTMapper() meta.RESTMapper { return c.client.RESTMapper() } +// GroupVersionKindFor returns the GroupVersionKind for the given object. +func (c *dryRunClient) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + return c.client.GroupVersionKindFor(obj) +} + +// IsObjectNamespaced returns true if the GroupVersionKind of the object is namespaced. +func (c *dryRunClient) IsObjectNamespaced(obj runtime.Object) (bool, error) { + return c.client.IsObjectNamespaced(obj) +} + // Create implements client.Client. func (c *dryRunClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { return c.client.Create(ctx, obj, append(opts, DryRunAll)...) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go index b642f7f88f..0ddda3163d 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go @@ -20,6 +20,7 @@ import ( "context" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" @@ -169,6 +170,10 @@ type Client interface { Scheme() *runtime.Scheme // RESTMapper returns the rest this client is using. RESTMapper() meta.RESTMapper + // GroupVersionKindFor returns the GroupVersionKind for the given object. + GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) + // IsObjectNamespaced returns true if the GroupVersionKind of the object is namespaced. + IsObjectNamespaced(obj runtime.Object) (bool, error) } // WithWatch supports Watch on top of the CRUD operations supported by diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go index 00bc2175ce..222dc79579 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go @@ -22,7 +22,7 @@ import ( "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/internal/objectutil" + "k8s.io/apimachinery/pkg/runtime/schema" ) // NewNamespacedClient wraps an existing client enforcing the namespace value. @@ -52,9 +52,19 @@ func (n *namespacedClient) RESTMapper() meta.RESTMapper { return n.client.RESTMapper() } +// GroupVersionKindFor returns the GroupVersionKind for the given object. +func (n *namespacedClient) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + return n.client.GroupVersionKindFor(obj) +} + +// IsObjectNamespaced returns true if the GroupVersionKind of the object is namespaced. +func (n *namespacedClient) IsObjectNamespaced(obj runtime.Object) (bool, error) { + return n.client.IsObjectNamespaced(obj) +} + // Create implements client.Client. func (n *namespacedClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + isNamespaceScoped, err := n.IsObjectNamespaced(obj) if err != nil { return fmt.Errorf("error finding the scope of the object: %w", err) } @@ -72,7 +82,7 @@ func (n *namespacedClient) Create(ctx context.Context, obj Object, opts ...Creat // Update implements client.Client. func (n *namespacedClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + isNamespaceScoped, err := n.IsObjectNamespaced(obj) if err != nil { return fmt.Errorf("error finding the scope of the object: %w", err) } @@ -90,7 +100,7 @@ func (n *namespacedClient) Update(ctx context.Context, obj Object, opts ...Updat // Delete implements client.Client. func (n *namespacedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + isNamespaceScoped, err := n.IsObjectNamespaced(obj) if err != nil { return fmt.Errorf("error finding the scope of the object: %w", err) } @@ -108,7 +118,7 @@ func (n *namespacedClient) Delete(ctx context.Context, obj Object, opts ...Delet // DeleteAllOf implements client.Client. func (n *namespacedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + isNamespaceScoped, err := n.IsObjectNamespaced(obj) if err != nil { return fmt.Errorf("error finding the scope of the object: %w", err) } @@ -121,7 +131,7 @@ func (n *namespacedClient) DeleteAllOf(ctx context.Context, obj Object, opts ... // Patch implements client.Client. func (n *namespacedClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + isNamespaceScoped, err := n.IsObjectNamespaced(obj) if err != nil { return fmt.Errorf("error finding the scope of the object: %w", err) } @@ -139,7 +149,7 @@ func (n *namespacedClient) Patch(ctx context.Context, obj Object, patch Patch, o // Get implements client.Client. func (n *namespacedClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + isNamespaceScoped, err := n.IsObjectNamespaced(obj) if err != nil { return fmt.Errorf("error finding the scope of the object: %w", err) } @@ -180,7 +190,7 @@ type namespacedClientSubResourceClient struct { } func (nsw *namespacedClientSubResourceClient) Get(ctx context.Context, obj, subResource Object, opts ...SubResourceGetOption) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, nsw.namespacedclient.Scheme(), nsw.namespacedclient.RESTMapper()) + isNamespaceScoped, err := nsw.namespacedclient.IsObjectNamespaced(obj) if err != nil { return fmt.Errorf("error finding the scope of the object: %w", err) } @@ -198,7 +208,7 @@ func (nsw *namespacedClientSubResourceClient) Get(ctx context.Context, obj, subR } func (nsw *namespacedClientSubResourceClient) Create(ctx context.Context, obj, subResource Object, opts ...SubResourceCreateOption) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, nsw.namespacedclient.Scheme(), nsw.namespacedclient.RESTMapper()) + isNamespaceScoped, err := nsw.namespacedclient.IsObjectNamespaced(obj) if err != nil { return fmt.Errorf("error finding the scope of the object: %w", err) } @@ -217,7 +227,7 @@ func (nsw *namespacedClientSubResourceClient) Create(ctx context.Context, obj, s // Update implements client.SubResourceWriter. func (nsw *namespacedClientSubResourceClient) Update(ctx context.Context, obj Object, opts ...SubResourceUpdateOption) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, nsw.namespacedclient.Scheme(), nsw.namespacedclient.RESTMapper()) + isNamespaceScoped, err := nsw.namespacedclient.IsObjectNamespaced(obj) if err != nil { return fmt.Errorf("error finding the scope of the object: %w", err) } @@ -235,8 +245,7 @@ func (nsw *namespacedClientSubResourceClient) Update(ctx context.Context, obj Ob // Patch implements client.SubResourceWriter. func (nsw *namespacedClientSubResourceClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...SubResourcePatchOption) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, nsw.namespacedclient.Scheme(), nsw.namespacedclient.RESTMapper()) - + isNamespaceScoped, err := nsw.namespacedclient.IsObjectNamespaced(obj) if err != nil { return fmt.Errorf("error finding the scope of the object: %w", err) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go index 7f6f5b83ff..d81bf25de9 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go @@ -513,8 +513,15 @@ type MatchingLabels map[string]string // ApplyToList applies this configuration to the given list options. func (m MatchingLabels) ApplyToList(opts *ListOptions) { // TODO(directxman12): can we avoid reserializing this over and over? - sel := labels.SelectorFromValidatedSet(map[string]string(m)) - opts.LabelSelector = sel + if opts.LabelSelector == nil { + opts.LabelSelector = labels.NewSelector() + } + // If there's already a selector, we need to AND the two together. + noValidSel := labels.SelectorFromValidatedSet(map[string]string(m)) + reqs, _ := noValidSel.Requirements() + for _, req := range reqs { + opts.LabelSelector = opts.LabelSelector.Add(req) + } } // ApplyToDeleteAllOf applies this configuration to the given an List options. @@ -528,14 +535,17 @@ type HasLabels []string // ApplyToList applies this configuration to the given list options. func (m HasLabels) ApplyToList(opts *ListOptions) { - sel := labels.NewSelector() + if opts.LabelSelector == nil { + opts.LabelSelector = labels.NewSelector() + } + // TODO: ignore invalid labels will result in an empty selector. + // This is inconsistent to the that of MatchingLabels. for _, label := range m { r, err := labels.NewRequirement(label, selection.Exists, nil) if err == nil { - sel = sel.Add(*r) + opts.LabelSelector = opts.LabelSelector.Add(*r) } } - opts.LabelSelector = sel } // ApplyToDeleteAllOf applies this configuration to the given an List options. @@ -606,6 +616,11 @@ func (n InNamespace) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { n.ApplyToList(&opts.ListOptions) } +// AsSelector returns a selector that matches objects in the given namespace. +func (n InNamespace) AsSelector() fields.Selector { + return fields.SelectorFromSet(fields.Set{"metadata.namespace": string(n)}) +} + // Limit specifies the maximum number of results to return from the server. // Limit does not implement DeleteAllOfOption interface because the server // does not support setting it for deletecollection operations. @@ -788,6 +803,11 @@ func (forceOwnership) ApplyToPatch(opts *PatchOptions) { opts.Force = &definitelyTrue } +func (forceOwnership) ApplyToSubResourcePatch(opts *SubResourcePatchOptions) { + definitelyTrue := true + opts.Force = &definitelyTrue +} + // }}} // {{{ DeleteAllOf Options diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go deleted file mode 100644 index 19d1ab4db7..0000000000 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go +++ /dev/null @@ -1,143 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package client - -import ( - "context" - "strings" - - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - - "sigs.k8s.io/controller-runtime/pkg/client/apiutil" -) - -// NewDelegatingClientInput encapsulates the input parameters to create a new delegating client. -type NewDelegatingClientInput struct { - CacheReader Reader - Client Client - UncachedObjects []Object - CacheUnstructured bool -} - -// NewDelegatingClient creates a new delegating client. -// -// A delegating client forms a Client by composing separate reader, writer and -// statusclient interfaces. This way, you can have an Client that reads from a -// cache and writes to the API server. -func NewDelegatingClient(in NewDelegatingClientInput) (Client, error) { - uncachedGVKs := map[schema.GroupVersionKind]struct{}{} - for _, obj := range in.UncachedObjects { - gvk, err := apiutil.GVKForObject(obj, in.Client.Scheme()) - if err != nil { - return nil, err - } - uncachedGVKs[gvk] = struct{}{} - } - - return &delegatingClient{ - scheme: in.Client.Scheme(), - mapper: in.Client.RESTMapper(), - Reader: &delegatingReader{ - CacheReader: in.CacheReader, - ClientReader: in.Client, - scheme: in.Client.Scheme(), - uncachedGVKs: uncachedGVKs, - cacheUnstructured: in.CacheUnstructured, - }, - Writer: in.Client, - StatusClient: in.Client, - SubResourceClientConstructor: in.Client, - }, nil -} - -type delegatingClient struct { - Reader - Writer - StatusClient - SubResourceClientConstructor - - scheme *runtime.Scheme - mapper meta.RESTMapper -} - -// Scheme returns the scheme this client is using. -func (d *delegatingClient) Scheme() *runtime.Scheme { - return d.scheme -} - -// RESTMapper returns the rest mapper this client is using. -func (d *delegatingClient) RESTMapper() meta.RESTMapper { - return d.mapper -} - -// delegatingReader forms a Reader that will cause Get and List requests for -// unstructured types to use the ClientReader while requests for any other type -// of object with use the CacheReader. This avoids accidentally caching the -// entire cluster in the common case of loading arbitrary unstructured objects -// (e.g. from OwnerReferences). -type delegatingReader struct { - CacheReader Reader - ClientReader Reader - - uncachedGVKs map[schema.GroupVersionKind]struct{} - scheme *runtime.Scheme - cacheUnstructured bool -} - -func (d *delegatingReader) shouldBypassCache(obj runtime.Object) (bool, error) { - gvk, err := apiutil.GVKForObject(obj, d.scheme) - if err != nil { - return false, err - } - // TODO: this is producing unsafe guesses that don't actually work, - // but it matches ~99% of the cases out there. - if meta.IsListType(obj) { - gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") - } - if _, isUncached := d.uncachedGVKs[gvk]; isUncached { - return true, nil - } - if !d.cacheUnstructured { - _, isUnstructured := obj.(*unstructured.Unstructured) - _, isUnstructuredList := obj.(*unstructured.UnstructuredList) - return isUnstructured || isUnstructuredList, nil - } - return false, nil -} - -// Get retrieves an obj for a given object key from the Kubernetes Cluster. -func (d *delegatingReader) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error { - if isUncached, err := d.shouldBypassCache(obj); err != nil { - return err - } else if isUncached { - return d.ClientReader.Get(ctx, key, obj, opts...) - } - return d.CacheReader.Get(ctx, key, obj, opts...) -} - -// List retrieves list of objects for a given namespace and list options. -func (d *delegatingReader) List(ctx context.Context, list ObjectList, opts ...ListOption) error { - if isUncached, err := d.shouldBypassCache(list); err != nil { - return err - } else if isUncached { - return d.ClientReader.List(ctx, list, opts...) - } - return d.CacheReader.List(ctx, list, opts...) -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go index ade251572b..92afd9a9c2 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go @@ -25,16 +25,14 @@ import ( var _ Reader = &typedClient{} var _ Writer = &typedClient{} -// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes -// new clients at the time they are used, and caches the client. type typedClient struct { - cache *clientCache + resources *clientRestResources paramCodec runtime.ParameterCodec } // Create implements client.Client. func (c *typedClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { - o, err := c.cache.getObjMeta(obj) + o, err := c.resources.getObjMeta(obj) if err != nil { return err } @@ -53,7 +51,7 @@ func (c *typedClient) Create(ctx context.Context, obj Object, opts ...CreateOpti // Update implements client.Client. func (c *typedClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { - o, err := c.cache.getObjMeta(obj) + o, err := c.resources.getObjMeta(obj) if err != nil { return err } @@ -73,7 +71,7 @@ func (c *typedClient) Update(ctx context.Context, obj Object, opts ...UpdateOpti // Delete implements client.Client. func (c *typedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { - o, err := c.cache.getObjMeta(obj) + o, err := c.resources.getObjMeta(obj) if err != nil { return err } @@ -92,7 +90,7 @@ func (c *typedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOpti // DeleteAllOf implements client.Client. func (c *typedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { - o, err := c.cache.getObjMeta(obj) + o, err := c.resources.getObjMeta(obj) if err != nil { return err } @@ -111,7 +109,7 @@ func (c *typedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...Delet // Patch implements client.Client. func (c *typedClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { - o, err := c.cache.getObjMeta(obj) + o, err := c.resources.getObjMeta(obj) if err != nil { return err } @@ -136,7 +134,7 @@ func (c *typedClient) Patch(ctx context.Context, obj Object, patch Patch, opts . // Get implements client.Client. func (c *typedClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error { - r, err := c.cache.getResource(obj) + r, err := c.resources.getResource(obj) if err != nil { return err } @@ -151,7 +149,7 @@ func (c *typedClient) Get(ctx context.Context, key ObjectKey, obj Object, opts . // List implements client.Client. func (c *typedClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { - r, err := c.cache.getResource(obj) + r, err := c.resources.getResource(obj) if err != nil { return err } @@ -168,7 +166,7 @@ func (c *typedClient) List(ctx context.Context, obj ObjectList, opts ...ListOpti } func (c *typedClient) GetSubResource(ctx context.Context, obj, subResourceObj Object, subResource string, opts ...SubResourceGetOption) error { - o, err := c.cache.getObjMeta(obj) + o, err := c.resources.getObjMeta(obj) if err != nil { return err } @@ -191,7 +189,7 @@ func (c *typedClient) GetSubResource(ctx context.Context, obj, subResourceObj Ob } func (c *typedClient) CreateSubResource(ctx context.Context, obj Object, subResourceObj Object, subResource string, opts ...SubResourceCreateOption) error { - o, err := c.cache.getObjMeta(obj) + o, err := c.resources.getObjMeta(obj) if err != nil { return err } @@ -216,7 +214,7 @@ func (c *typedClient) CreateSubResource(ctx context.Context, obj Object, subReso // UpdateSubResource used by SubResourceWriter to write status. func (c *typedClient) UpdateSubResource(ctx context.Context, obj Object, subResource string, opts ...SubResourceUpdateOption) error { - o, err := c.cache.getObjMeta(obj) + o, err := c.resources.getObjMeta(obj) if err != nil { return err } @@ -251,7 +249,7 @@ func (c *typedClient) UpdateSubResource(ctx context.Context, obj Object, subReso // PatchSubResource used by SubResourceWriter to write subresource. func (c *typedClient) PatchSubResource(ctx context.Context, obj Object, subResource string, patch Patch, opts ...SubResourcePatchOption) error { - o, err := c.cache.getObjMeta(obj) + o, err := c.resources.getObjMeta(obj) if err != nil { return err } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go index 7f25c7be90..0d96951780 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go @@ -21,30 +21,27 @@ import ( "fmt" "strings" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" ) var _ Reader = &unstructuredClient{} var _ Writer = &unstructuredClient{} -// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes -// new clients at the time they are used, and caches the client. type unstructuredClient struct { - cache *clientCache + resources *clientRestResources paramCodec runtime.ParameterCodec } // Create implements client.Client. func (uc *unstructuredClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { - u, ok := obj.(*unstructured.Unstructured) + u, ok := obj.(runtime.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - gvk := u.GroupVersionKind() + gvk := u.GetObjectKind().GroupVersionKind() - o, err := uc.cache.getObjMeta(obj) + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } @@ -60,20 +57,20 @@ func (uc *unstructuredClient) Create(ctx context.Context, obj Object, opts ...Cr Do(ctx). Into(obj) - u.SetGroupVersionKind(gvk) + u.GetObjectKind().SetGroupVersionKind(gvk) return result } // Update implements client.Client. func (uc *unstructuredClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { - u, ok := obj.(*unstructured.Unstructured) + u, ok := obj.(runtime.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - gvk := u.GroupVersionKind() + gvk := u.GetObjectKind().GroupVersionKind() - o, err := uc.cache.getObjMeta(obj) + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } @@ -90,17 +87,17 @@ func (uc *unstructuredClient) Update(ctx context.Context, obj Object, opts ...Up Do(ctx). Into(obj) - u.SetGroupVersionKind(gvk) + u.GetObjectKind().SetGroupVersionKind(gvk) return result } // Delete implements client.Client. func (uc *unstructuredClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { - if _, ok := obj.(*unstructured.Unstructured); !ok { + if _, ok := obj.(runtime.Unstructured); !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - o, err := uc.cache.getObjMeta(obj) + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } @@ -119,11 +116,11 @@ func (uc *unstructuredClient) Delete(ctx context.Context, obj Object, opts ...De // DeleteAllOf implements client.Client. func (uc *unstructuredClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { - if _, ok := obj.(*unstructured.Unstructured); !ok { + if _, ok := obj.(runtime.Unstructured); !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - o, err := uc.cache.getObjMeta(obj) + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } @@ -142,11 +139,11 @@ func (uc *unstructuredClient) DeleteAllOf(ctx context.Context, obj Object, opts // Patch implements client.Client. func (uc *unstructuredClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { - if _, ok := obj.(*unstructured.Unstructured); !ok { + if _, ok := obj.(runtime.Unstructured); !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - o, err := uc.cache.getObjMeta(obj) + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } @@ -171,17 +168,17 @@ func (uc *unstructuredClient) Patch(ctx context.Context, obj Object, patch Patch // Get implements client.Client. func (uc *unstructuredClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error { - u, ok := obj.(*unstructured.Unstructured) + u, ok := obj.(runtime.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - gvk := u.GroupVersionKind() + gvk := u.GetObjectKind().GroupVersionKind() getOpts := GetOptions{} getOpts.ApplyOptions(opts) - r, err := uc.cache.getResource(obj) + r, err := uc.resources.getResource(obj) if err != nil { return err } @@ -194,22 +191,22 @@ func (uc *unstructuredClient) Get(ctx context.Context, key ObjectKey, obj Object Do(ctx). Into(obj) - u.SetGroupVersionKind(gvk) + u.GetObjectKind().SetGroupVersionKind(gvk) return result } // List implements client.Client. func (uc *unstructuredClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { - u, ok := obj.(*unstructured.UnstructuredList) + u, ok := obj.(runtime.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - gvk := u.GroupVersionKind() + gvk := u.GetObjectKind().GroupVersionKind() gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") - r, err := uc.cache.getResource(obj) + r, err := uc.resources.getResource(obj) if err != nil { return err } @@ -226,19 +223,19 @@ func (uc *unstructuredClient) List(ctx context.Context, obj ObjectList, opts ... } func (uc *unstructuredClient) GetSubResource(ctx context.Context, obj, subResourceObj Object, subResource string, opts ...SubResourceGetOption) error { - if _, ok := obj.(*unstructured.Unstructured); !ok { - return fmt.Errorf("unstructured client did not understand object: %T", subResource) + if _, ok := obj.(runtime.Unstructured); !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) } - if _, ok := subResourceObj.(*unstructured.Unstructured); !ok { - return fmt.Errorf("unstructured client did not understand object: %T", obj) + if _, ok := subResourceObj.(runtime.Unstructured); !ok { + return fmt.Errorf("unstructured client did not understand object: %T", subResourceObj) } if subResourceObj.GetName() == "" { subResourceObj.SetName(obj.GetName()) } - o, err := uc.cache.getObjMeta(obj) + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } @@ -257,19 +254,19 @@ func (uc *unstructuredClient) GetSubResource(ctx context.Context, obj, subResour } func (uc *unstructuredClient) CreateSubResource(ctx context.Context, obj, subResourceObj Object, subResource string, opts ...SubResourceCreateOption) error { - if _, ok := obj.(*unstructured.Unstructured); !ok { - return fmt.Errorf("unstructured client did not understand object: %T", subResourceObj) + if _, ok := obj.(runtime.Unstructured); !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) } - if _, ok := subResourceObj.(*unstructured.Unstructured); !ok { - return fmt.Errorf("unstructured client did not understand object: %T", obj) + if _, ok := subResourceObj.(runtime.Unstructured); !ok { + return fmt.Errorf("unstructured client did not understand object: %T", subResourceObj) } if subResourceObj.GetName() == "" { subResourceObj.SetName(obj.GetName()) } - o, err := uc.cache.getObjMeta(obj) + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } @@ -289,11 +286,11 @@ func (uc *unstructuredClient) CreateSubResource(ctx context.Context, obj, subRes } func (uc *unstructuredClient) UpdateSubResource(ctx context.Context, obj Object, subResource string, opts ...SubResourceUpdateOption) error { - if _, ok := obj.(*unstructured.Unstructured); !ok { + if _, ok := obj.(runtime.Unstructured); !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - o, err := uc.cache.getObjMeta(obj) + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } @@ -324,14 +321,14 @@ func (uc *unstructuredClient) UpdateSubResource(ctx context.Context, obj Object, } func (uc *unstructuredClient) PatchSubResource(ctx context.Context, obj Object, subResource string, patch Patch, opts ...SubResourcePatchOption) error { - u, ok := obj.(*unstructured.Unstructured) + u, ok := obj.(runtime.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - gvk := u.GroupVersionKind() + gvk := u.GetObjectKind().GroupVersionKind() - o, err := uc.cache.getObjMeta(obj) + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } @@ -359,6 +356,6 @@ func (uc *unstructuredClient) PatchSubResource(ctx context.Context, obj Object, Do(ctx). Into(body) - u.SetGroupVersionKind(gvk) + u.GetObjectKind().SetGroupVersionKind(gvk) return result } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/watch.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/watch.go index 70490664bd..181b22a673 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/watch.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/watch.go @@ -21,9 +21,8 @@ import ( "strings" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/dynamic" "k8s.io/client-go/rest" ) @@ -33,21 +32,16 @@ func NewWithWatch(config *rest.Config, options Options) (WithWatch, error) { if err != nil { return nil, err } - dynamicClient, err := dynamic.NewForConfig(config) - if err != nil { - return nil, err - } - return &watchingClient{client: client, dynamic: dynamicClient}, nil + return &watchingClient{client: client}, nil } type watchingClient struct { *client - dynamic dynamic.Interface } func (w *watchingClient) Watch(ctx context.Context, list ObjectList, opts ...ListOption) (watch.Interface, error) { switch l := list.(type) { - case *unstructured.UnstructuredList: + case runtime.Unstructured: return w.unstructuredWatch(ctx, l, opts...) case *metav1.PartialObjectMetadataList: return w.metadataWatch(ctx, l, opts...) @@ -81,25 +75,23 @@ func (w *watchingClient) metadataWatch(ctx context.Context, obj *metav1.PartialO return resInt.Watch(ctx, *listOpts.AsListOptions()) } -func (w *watchingClient) unstructuredWatch(ctx context.Context, obj *unstructured.UnstructuredList, opts ...ListOption) (watch.Interface, error) { - gvk := obj.GroupVersionKind() - gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") - - r, err := w.client.unstructuredClient.cache.getResource(obj) +func (w *watchingClient) unstructuredWatch(ctx context.Context, obj runtime.Unstructured, opts ...ListOption) (watch.Interface, error) { + r, err := w.client.unstructuredClient.resources.getResource(obj) if err != nil { return nil, err } listOpts := w.listOpts(opts...) - if listOpts.Namespace != "" && r.isNamespaced() { - return w.dynamic.Resource(r.mapping.Resource).Namespace(listOpts.Namespace).Watch(ctx, *listOpts.AsListOptions()) - } - return w.dynamic.Resource(r.mapping.Resource).Watch(ctx, *listOpts.AsListOptions()) + return r.Get(). + NamespaceIfScoped(listOpts.Namespace, r.isNamespaced()). + Resource(r.resource()). + VersionedParams(listOpts.AsListOptions(), w.client.unstructuredClient.paramCodec). + Watch(ctx) } func (w *watchingClient) typedWatch(ctx context.Context, obj ObjectList, opts ...ListOption) (watch.Interface, error) { - r, err := w.client.typedClient.cache.getResource(obj) + r, err := w.client.typedClient.resources.getResource(obj) if err != nil { return nil, err } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/cluster.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/cluster.go index 905296cd35..7ab76555b3 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/cluster.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/cluster.go @@ -19,6 +19,7 @@ package cluster import ( "context" "errors" + "net/http" "time" "github.com/go-logr/logr" @@ -27,6 +28,7 @@ import ( "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "k8s.io/client-go/tools/record" + "k8s.io/utils/pointer" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" logf "sigs.k8s.io/controller-runtime/pkg/internal/log" @@ -37,14 +39,15 @@ import ( // Cluster provides various methods to interact with a cluster. type Cluster interface { - // SetFields will set any dependencies on an object for which the object has implemented the inject - // interface - e.g. inject.Client. - // Deprecated: use the equivalent Options field to set a field. This method will be removed in v0.10. - SetFields(interface{}) error + // GetHTTPClient returns an HTTP client that can be used to talk to the apiserver + GetHTTPClient() *http.Client // GetConfig returns an initialized Config GetConfig() *rest.Config + // GetCache returns a cache.Cache + GetCache() cache.Cache + // GetScheme returns an initialized Scheme GetScheme() *runtime.Scheme @@ -57,9 +60,6 @@ type Cluster interface { // GetFieldIndexer returns a client.FieldIndexer configured with the client GetFieldIndexer() client.FieldIndexer - // GetCache returns a cache.Cache - GetCache() cache.Cache - // GetEventRecorderFor returns a new EventRecorder for the provided name GetEventRecorderFor(name string) record.EventRecorder @@ -83,7 +83,7 @@ type Options struct { Scheme *runtime.Scheme // MapperProvider provides the rest mapper used to map go types to Kubernetes APIs - MapperProvider func(c *rest.Config) (meta.RESTMapper, error) + MapperProvider func(c *rest.Config, httpClient *http.Client) (meta.RESTMapper, error) // Logger is the logger that should be used by this Cluster. // If none is set, it defaults to log.Log global logger. @@ -103,24 +103,54 @@ type Options struct { // Note: If a namespace is specified, controllers can still Watch for a // cluster-scoped resource (e.g Node). For namespaced resources the cache // will only hold objects from the desired namespace. + // + // Deprecated: Use Cache.Namespaces instead. Namespace string + // HTTPClient is the http client that will be used to create the default + // Cache and Client. If not set the rest.HTTPClientFor function will be used + // to create the http client. + HTTPClient *http.Client + + // Cache is the cache.Options that will be used to create the default Cache. + // By default, the cache will watch and list requested objects in all namespaces. + Cache cache.Options + // NewCache is the function that will create the cache to be used // by the manager. If not set this will use the default new cache function. + // + // When using a custom NewCache, the Cache options will be passed to the + // NewCache function. + // + // NOTE: LOW LEVEL PRIMITIVE! + // Only use a custom NewCache if you know what you are doing. NewCache cache.NewCacheFunc + // Client is the client.Options that will be used to create the default Client. + // By default, the client will use the cache for reads and direct calls for writes. + Client client.Options + // NewClient is the func that creates the client to be used by the manager. - // If not set this will create the default DelegatingClient that will - // use the cache for reads and the client for writes. - // NOTE: The default client will not cache Unstructured. - NewClient NewClientFunc + // If not set this will create a Client backed by a Cache for read operations + // and a direct Client for write operations. + // + // When using a custom NewClient, the Client options will be passed to the + // NewClient function. + // + // NOTE: LOW LEVEL PRIMITIVE! + // Only use a custom NewClient if you know what you are doing. + NewClient client.NewClientFunc // ClientDisableCacheFor tells the client that, if any cache is used, to bypass it // for the given objects. + // + // Deprecated: Use Client.Cache.DisableFor instead. ClientDisableCacheFor []client.Object // DryRunClient specifies whether the client should be configured to enforce // dryRun mode. + // + // Deprecated: Use Client.DryRun instead. DryRunClient bool // EventBroadcaster records Events emitted by the manager and sends them to the Kubernetes API @@ -137,7 +167,7 @@ type Options struct { makeBroadcaster intrec.EventBroadcasterProducer // Dependency injection for testing - newRecorderProvider func(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster intrec.EventBroadcasterProducer) (*intrec.Provider, error) + newRecorderProvider func(config *rest.Config, httpClient *http.Client, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster intrec.EventBroadcasterProducer) (*intrec.Provider, error) } // Option can be used to manipulate Options. @@ -149,56 +179,116 @@ func New(config *rest.Config, opts ...Option) (Cluster, error) { return nil, errors.New("must specify Config") } + originalConfig := config + + config = rest.CopyConfig(config) + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + options := Options{} for _, opt := range opts { opt(&options) } - options = setOptionsDefaults(options) + options, err := setOptionsDefaults(options, config) + if err != nil { + options.Logger.Error(err, "Failed to set defaults") + return nil, err + } // Create the mapper provider - mapper, err := options.MapperProvider(config) + mapper, err := options.MapperProvider(config, options.HTTPClient) if err != nil { options.Logger.Error(err, "Failed to get API Group-Resources") return nil, err } // Create the cache for the cached read client and registering informers - cache, err := options.NewCache(config, cache.Options{Scheme: options.Scheme, Mapper: mapper, Resync: options.SyncPeriod, Namespace: options.Namespace}) + cacheOpts := options.Cache + { + if cacheOpts.Scheme == nil { + cacheOpts.Scheme = options.Scheme + } + if cacheOpts.Mapper == nil { + cacheOpts.Mapper = mapper + } + if cacheOpts.HTTPClient == nil { + cacheOpts.HTTPClient = options.HTTPClient + } + if cacheOpts.SyncPeriod == nil { + cacheOpts.SyncPeriod = options.SyncPeriod + } + if len(cacheOpts.Namespaces) == 0 && options.Namespace != "" { + cacheOpts.Namespaces = []string{options.Namespace} + } + } + cache, err := options.NewCache(config, cacheOpts) if err != nil { return nil, err } - clientOptions := client.Options{Scheme: options.Scheme, Mapper: mapper} + // Create the client, and default its options. + clientOpts := options.Client + { + if clientOpts.Scheme == nil { + clientOpts.Scheme = options.Scheme + } + if clientOpts.Mapper == nil { + clientOpts.Mapper = mapper + } + if clientOpts.HTTPClient == nil { + clientOpts.HTTPClient = options.HTTPClient + } + if clientOpts.Cache == nil { + clientOpts.Cache = &client.CacheOptions{ + Unstructured: false, + } + } + if clientOpts.Cache.Reader == nil { + clientOpts.Cache.Reader = cache + } + + // For backward compatibility, the ClientDisableCacheFor option should + // be appended to the DisableFor option in the client. + clientOpts.Cache.DisableFor = append(clientOpts.Cache.DisableFor, options.ClientDisableCacheFor...) - apiReader, err := client.New(config, clientOptions) + if clientOpts.DryRun == nil && options.DryRunClient { + // For backward compatibility, the DryRunClient (if set) option should override + // the DryRun option in the client (if unset). + clientOpts.DryRun = pointer.Bool(true) + } + } + clientWriter, err := options.NewClient(config, clientOpts) if err != nil { return nil, err } - writeObj, err := options.NewClient(cache, config, clientOptions, options.ClientDisableCacheFor...) + // Create the API Reader, a client with no cache. + clientReader, err := client.New(config, client.Options{ + HTTPClient: options.HTTPClient, + Scheme: options.Scheme, + Mapper: mapper, + }) if err != nil { return nil, err } - if options.DryRunClient { - writeObj = client.NewDryRunClient(writeObj) - } - // Create the recorder provider to inject event recorders for the components. // TODO(directxman12): the log for the event provider should have a context (name, tags, etc) specific // to the particular controller that it's being injected into, rather than a generic one like is here. - recorderProvider, err := options.newRecorderProvider(config, options.Scheme, options.Logger.WithName("events"), options.makeBroadcaster) + recorderProvider, err := options.newRecorderProvider(config, options.HTTPClient, options.Scheme, options.Logger.WithName("events"), options.makeBroadcaster) if err != nil { return nil, err } return &cluster{ - config: config, + config: originalConfig, + httpClient: options.HTTPClient, scheme: options.Scheme, cache: cache, fieldIndexes: cache, - client: writeObj, - apiReader: apiReader, + client: clientWriter, + apiReader: clientReader, recorderProvider: recorderProvider, mapper: mapper, logger: options.Logger, @@ -206,21 +296,27 @@ func New(config *rest.Config, opts ...Option) (Cluster, error) { } // setOptionsDefaults set default values for Options fields. -func setOptionsDefaults(options Options) Options { +func setOptionsDefaults(options Options, config *rest.Config) (Options, error) { + if options.HTTPClient == nil { + var err error + options.HTTPClient, err = rest.HTTPClientFor(config) + if err != nil { + return options, err + } + } + // Use the Kubernetes client-go scheme if none is specified if options.Scheme == nil { options.Scheme = scheme.Scheme } if options.MapperProvider == nil { - options.MapperProvider = func(c *rest.Config) (meta.RESTMapper, error) { - return apiutil.NewDynamicRESTMapper(c) - } + options.MapperProvider = apiutil.NewDynamicRESTMapper } // Allow users to define how to create a new client if options.NewClient == nil { - options.NewClient = DefaultNewClient + options.NewClient = client.New } // Allow newCache to be mocked @@ -250,39 +346,5 @@ func setOptionsDefaults(options Options) Options { options.Logger = logf.RuntimeLog.WithName("cluster") } - return options -} - -// NewClientFunc allows a user to define how to create a client. -type NewClientFunc func(cache cache.Cache, config *rest.Config, options client.Options, uncachedObjects ...client.Object) (client.Client, error) - -// ClientOptions are the optional arguments for tuning the caching client. -type ClientOptions struct { - UncachedObjects []client.Object - CacheUnstructured bool -} - -// DefaultNewClient creates the default caching client, that will never cache Unstructured. -func DefaultNewClient(cache cache.Cache, config *rest.Config, options client.Options, uncachedObjects ...client.Object) (client.Client, error) { - return ClientBuilderWithOptions(ClientOptions{})(cache, config, options, uncachedObjects...) -} - -// ClientBuilderWithOptions returns a Client constructor that will build a client -// honoring the options argument -func ClientBuilderWithOptions(options ClientOptions) NewClientFunc { - return func(cache cache.Cache, config *rest.Config, clientOpts client.Options, uncachedObjects ...client.Object) (client.Client, error) { - options.UncachedObjects = append(options.UncachedObjects, uncachedObjects...) - - c, err := client.New(config, clientOpts) - if err != nil { - return nil, err - } - - return client.NewDelegatingClient(client.NewDelegatingClientInput{ - CacheReader: cache, - Client: c, - UncachedObjects: options.UncachedObjects, - CacheUnstructured: options.CacheUnstructured, - }) - } + return options, nil } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/internal.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/internal.go index 125e1d144e..2742764231 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/internal.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/internal.go @@ -18,6 +18,7 @@ package cluster import ( "context" + "net/http" "github.com/go-logr/logr" "k8s.io/apimachinery/pkg/api/meta" @@ -28,22 +29,16 @@ import ( "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" intrec "sigs.k8s.io/controller-runtime/pkg/internal/recorder" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" ) type cluster struct { // config is the rest.config used to talk to the apiserver. Required. config *rest.Config - // scheme is the scheme injected into Controllers, EventHandlers, Sources and Predicates. Defaults - // to scheme.scheme. - scheme *runtime.Scheme - - cache cache.Cache - - // TODO(directxman12): Provide an escape hatch to get individual indexers - // client is the client injected into Controllers (and EventHandlers, Sources and Predicates). - client client.Client + httpClient *http.Client + scheme *runtime.Scheme + cache cache.Cache + client client.Client // apiReader is the reader that will make requests to the api server and not the cache. apiReader client.Reader @@ -64,32 +59,14 @@ type cluster struct { logger logr.Logger } -func (c *cluster) SetFields(i interface{}) error { - if _, err := inject.ConfigInto(c.config, i); err != nil { - return err - } - if _, err := inject.ClientInto(c.client, i); err != nil { - return err - } - if _, err := inject.APIReaderInto(c.apiReader, i); err != nil { - return err - } - if _, err := inject.SchemeInto(c.scheme, i); err != nil { - return err - } - if _, err := inject.CacheInto(c.cache, i); err != nil { - return err - } - if _, err := inject.MapperInto(c.mapper, i); err != nil { - return err - } - return nil -} - func (c *cluster) GetConfig() *rest.Config { return c.config } +func (c *cluster) GetHTTPClient() *http.Client { + return c.httpClient +} + func (c *cluster) GetClient() client.Client { return c.client } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/config/config.go b/vendor/sigs.k8s.io/controller-runtime/pkg/config/config.go index 8e853d6a0f..9c7b875a86 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/config/config.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/config/config.go @@ -29,6 +29,8 @@ import ( // ControllerManagerConfiguration defines the functions necessary to parse a config file // and to configure the Options struct for the ctrl.Manager. +// +// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. type ControllerManagerConfiguration interface { runtime.Object @@ -38,6 +40,8 @@ type ControllerManagerConfiguration interface { // DeferredFileLoader is used to configure the decoder for loading controller // runtime component config types. +// +// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. type DeferredFileLoader struct { ControllerManagerConfiguration path string @@ -52,6 +56,8 @@ type DeferredFileLoader struct { // Defaults: // * Path: "./config.yaml" // * Kind: GenericControllerManagerConfiguration +// +// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. func File() *DeferredFileLoader { scheme := runtime.NewScheme() utilruntime.Must(v1alpha1.AddToScheme(scheme)) @@ -83,12 +89,6 @@ func (d *DeferredFileLoader) OfKind(obj ControllerManagerConfiguration) *Deferre return d } -// InjectScheme will configure the scheme to be used for decoding the file. -func (d *DeferredFileLoader) InjectScheme(scheme *runtime.Scheme) error { - d.scheme = scheme - return nil -} - // loadFile is used from the mutex.Once to load the file. func (d *DeferredFileLoader) loadFile() { if d.scheme == nil { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/config/controller.go b/vendor/sigs.k8s.io/controller-runtime/pkg/config/controller.go new file mode 100644 index 0000000000..b37dffaeea --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/config/controller.go @@ -0,0 +1,49 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import "time" + +// Controller contains configuration options for a controller. +type Controller struct { + // GroupKindConcurrency is a map from a Kind to the number of concurrent reconciliation + // allowed for that controller. + // + // When a controller is registered within this manager using the builder utilities, + // users have to specify the type the controller reconciles in the For(...) call. + // If the object's kind passed matches one of the keys in this map, the concurrency + // for that controller is set to the number specified. + // + // The key is expected to be consistent in form with GroupKind.String(), + // e.g. ReplicaSet in apps group (regardless of version) would be `ReplicaSet.apps`. + GroupKindConcurrency map[string]int + + // MaxConcurrentReconciles is the maximum number of concurrent Reconciles which can be run. Defaults to 1. + MaxConcurrentReconciles int + + // CacheSyncTimeout refers to the time limit set to wait for syncing caches. + // Defaults to 2 minutes if not set. + CacheSyncTimeout time.Duration + + // RecoverPanic indicates whether the panic caused by reconcile should be recovered. + // Defaults to the Controller.RecoverPanic setting from the Manager if unset. + RecoverPanic *bool + + // NeedLeaderElection indicates whether the controller needs to use leader election. + // Defaults to true, which means the controller will use leader election. + NeedLeaderElection *bool +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/config/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/config/doc.go index a169ec5597..47a5a2f1d7 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/config/doc.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/config/doc.go @@ -14,12 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package config contains functionality for interacting with ComponentConfig -// files -// -// # DeferredFileLoader -// -// This uses a deferred file decoding allowing you to chain your configuration -// setup. You can pass this into manager.Options#File and it will load your -// config. +// Package config contains functionality for interacting with +// configuration for controller-runtime components. package config diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/doc.go index 1e3adbafb8..8fdf14d39a 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/doc.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/doc.go @@ -17,4 +17,6 @@ limitations under the License. // Package v1alpha1 provides the ControllerManagerConfiguration used for // configuring ctrl.Manager // +kubebuilder:object:generate=true +// +// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. package v1alpha1 diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/register.go b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/register.go index 9efdbc0668..ca854bcf30 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/register.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/register.go @@ -23,12 +23,18 @@ import ( var ( // GroupVersion is group version used to register these objects. + // + // Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. GroupVersion = schema.GroupVersion{Group: "controller-runtime.sigs.k8s.io", Version: "v1alpha1"} // SchemeBuilder is used to add go types to the GroupVersionKind scheme. + // + // Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} // AddToScheme adds the types in this group-version to the given scheme. + // + // Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. AddToScheme = SchemeBuilder.AddToScheme ) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/types.go b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/types.go index f2226278c6..52c8ab300f 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/types.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/types.go @@ -25,6 +25,8 @@ import ( ) // ControllerManagerConfigurationSpec defines the desired state of GenericControllerManagerConfiguration. +// +// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. type ControllerManagerConfigurationSpec struct { // SyncPeriod determines the minimum frequency at which watched resources are // reconciled. A lower period will correct entropy more quickly, but reduce @@ -60,7 +62,7 @@ type ControllerManagerConfigurationSpec struct { // +optional Controller *ControllerConfigurationSpec `json:"controller,omitempty"` - // Metrics contains thw controller metrics configuration + // Metrics contains the controller metrics configuration // +optional Metrics ControllerMetrics `json:"metrics,omitempty"` @@ -75,6 +77,11 @@ type ControllerManagerConfigurationSpec struct { // ControllerConfigurationSpec defines the global configuration for // controllers registered with the manager. +// +// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. +// +// Deprecated: Controller global configuration can now be set at the manager level, +// using the manager.Options.Controller field. type ControllerConfigurationSpec struct { // GroupKindConcurrency is a map from a Kind to the number of concurrent reconciliation // allowed for that controller. @@ -101,6 +108,8 @@ type ControllerConfigurationSpec struct { } // ControllerMetrics defines the metrics configs. +// +// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. type ControllerMetrics struct { // BindAddress is the TCP address that the controller should bind to // for serving prometheus metrics. @@ -110,6 +119,8 @@ type ControllerMetrics struct { } // ControllerHealth defines the health configs. +// +// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. type ControllerHealth struct { // HealthProbeBindAddress is the TCP address that the controller should bind to // for serving health probes @@ -127,6 +138,8 @@ type ControllerHealth struct { } // ControllerWebhook defines the webhook server for the controller. +// +// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. type ControllerWebhook struct { // Port is the port that the webhook server serves at. // It is used to set webhook.Server.Port. @@ -149,6 +162,8 @@ type ControllerWebhook struct { // +kubebuilder:object:root=true // ControllerManagerConfiguration is the Schema for the GenericControllerManagerConfigurations API. +// +// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. type ControllerManagerConfiguration struct { metav1.TypeMeta `json:",inline"` @@ -157,6 +172,8 @@ type ControllerManagerConfiguration struct { } // Complete returns the configuration for controller-runtime. +// +// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. func (c *ControllerManagerConfigurationSpec) Complete() (ControllerManagerConfigurationSpec, error) { return *c, nil } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go index fe7f94fdc1..6732b6f709 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go @@ -39,6 +39,18 @@ type Options struct { // MaxConcurrentReconciles is the maximum number of concurrent Reconciles which can be run. Defaults to 1. MaxConcurrentReconciles int + // CacheSyncTimeout refers to the time limit set to wait for syncing caches. + // Defaults to 2 minutes if not set. + CacheSyncTimeout time.Duration + + // RecoverPanic indicates whether the panic caused by reconcile should be recovered. + // Defaults to the Controller.RecoverPanic setting from the Manager if unset. + RecoverPanic *bool + + // NeedLeaderElection indicates whether the controller needs to use leader election. + // Defaults to true, which means the controller will use leader election. + NeedLeaderElection *bool + // Reconciler reconciles an object Reconciler reconcile.Reconciler @@ -50,14 +62,6 @@ type Options struct { // LogConstructor is used to construct a logger used for this controller and passed // to each reconciliation via the context field. LogConstructor func(request *reconcile.Request) logr.Logger - - // CacheSyncTimeout refers to the time limit set to wait for syncing caches. - // Defaults to 2 minutes if not set. - CacheSyncTimeout time.Duration - - // RecoverPanic indicates whether the panic caused by reconcile should be recovered. - // Defaults to the Controller.RecoverPanic setting from the Manager if unset. - RecoverPanic *bool } // Controller implements a Kubernetes API. A Controller manages a work queue fed reconcile.Requests @@ -124,26 +128,33 @@ func NewUnmanaged(name string, mgr manager.Manager, options Options) (Controller } if options.MaxConcurrentReconciles <= 0 { - options.MaxConcurrentReconciles = 1 + if mgr.GetControllerOptions().MaxConcurrentReconciles > 0 { + options.MaxConcurrentReconciles = mgr.GetControllerOptions().MaxConcurrentReconciles + } else { + options.MaxConcurrentReconciles = 1 + } } if options.CacheSyncTimeout == 0 { - options.CacheSyncTimeout = 2 * time.Minute + if mgr.GetControllerOptions().CacheSyncTimeout != 0 { + options.CacheSyncTimeout = mgr.GetControllerOptions().CacheSyncTimeout + } else { + options.CacheSyncTimeout = 2 * time.Minute + } } if options.RateLimiter == nil { options.RateLimiter = workqueue.DefaultControllerRateLimiter() } - // Inject dependencies into Reconciler - if err := mgr.SetFields(options.Reconciler); err != nil { - return nil, err - } - if options.RecoverPanic == nil { options.RecoverPanic = mgr.GetControllerOptions().RecoverPanic } + if options.NeedLeaderElection == nil { + options.NeedLeaderElection = mgr.GetControllerOptions().NeedLeaderElection + } + // Create controller with dependencies set return &controller.Controller{ Do: options.Reconciler, @@ -152,10 +163,10 @@ func NewUnmanaged(name string, mgr manager.Manager, options Options) (Controller }, MaxConcurrentReconciles: options.MaxConcurrentReconciles, CacheSyncTimeout: options.CacheSyncTimeout, - SetFields: mgr.SetFields, Name: name, LogConstructor: options.LogConstructor, RecoverPanic: options.RecoverPanic, + LeaderElected: options.NeedLeaderElection, }, nil } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go index e6d3a4eaab..c72b2e1ebb 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go @@ -17,6 +17,8 @@ limitations under the License. package handler import ( + "context" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/event" @@ -36,7 +38,7 @@ var _ EventHandler = &EnqueueRequestForObject{} type EnqueueRequestForObject struct{} // Create implements EventHandler. -func (e *EnqueueRequestForObject) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { +func (e *EnqueueRequestForObject) Create(ctx context.Context, evt event.CreateEvent, q workqueue.RateLimitingInterface) { if evt.Object == nil { enqueueLog.Error(nil, "CreateEvent received with no metadata", "event", evt) return @@ -48,7 +50,7 @@ func (e *EnqueueRequestForObject) Create(evt event.CreateEvent, q workqueue.Rate } // Update implements EventHandler. -func (e *EnqueueRequestForObject) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { +func (e *EnqueueRequestForObject) Update(ctx context.Context, evt event.UpdateEvent, q workqueue.RateLimitingInterface) { switch { case evt.ObjectNew != nil: q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ @@ -66,7 +68,7 @@ func (e *EnqueueRequestForObject) Update(evt event.UpdateEvent, q workqueue.Rate } // Delete implements EventHandler. -func (e *EnqueueRequestForObject) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { +func (e *EnqueueRequestForObject) Delete(ctx context.Context, evt event.DeleteEvent, q workqueue.RateLimitingInterface) { if evt.Object == nil { enqueueLog.Error(nil, "DeleteEvent received with no metadata", "event", evt) return @@ -78,7 +80,7 @@ func (e *EnqueueRequestForObject) Delete(evt event.DeleteEvent, q workqueue.Rate } // Generic implements EventHandler. -func (e *EnqueueRequestForObject) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) { +func (e *EnqueueRequestForObject) Generic(ctx context.Context, evt event.GenericEvent, q workqueue.RateLimitingInterface) { if evt.Object == nil { enqueueLog.Error(nil, "GenericEvent received with no metadata", "event", evt) return diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go index 17401b1fdb..b55fdde6ba 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go @@ -17,16 +17,17 @@ limitations under the License. package handler import ( + "context" + "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" ) // MapFunc is the signature required for enqueueing requests from a generic function. // This type is usually used with EnqueueRequestsFromMapFunc when registering an event handler. -type MapFunc func(client.Object) []reconcile.Request +type MapFunc func(context.Context, client.Object) []reconcile.Request // EnqueueRequestsFromMapFunc enqueues Requests by running a transformation function that outputs a collection // of reconcile.Requests on each Event. The reconcile.Requests may be for an arbitrary set of objects @@ -52,32 +53,32 @@ type enqueueRequestsFromMapFunc struct { } // Create implements EventHandler. -func (e *enqueueRequestsFromMapFunc) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { +func (e *enqueueRequestsFromMapFunc) Create(ctx context.Context, evt event.CreateEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} - e.mapAndEnqueue(q, evt.Object, reqs) + e.mapAndEnqueue(ctx, q, evt.Object, reqs) } // Update implements EventHandler. -func (e *enqueueRequestsFromMapFunc) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { +func (e *enqueueRequestsFromMapFunc) Update(ctx context.Context, evt event.UpdateEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} - e.mapAndEnqueue(q, evt.ObjectOld, reqs) - e.mapAndEnqueue(q, evt.ObjectNew, reqs) + e.mapAndEnqueue(ctx, q, evt.ObjectOld, reqs) + e.mapAndEnqueue(ctx, q, evt.ObjectNew, reqs) } // Delete implements EventHandler. -func (e *enqueueRequestsFromMapFunc) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { +func (e *enqueueRequestsFromMapFunc) Delete(ctx context.Context, evt event.DeleteEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} - e.mapAndEnqueue(q, evt.Object, reqs) + e.mapAndEnqueue(ctx, q, evt.Object, reqs) } // Generic implements EventHandler. -func (e *enqueueRequestsFromMapFunc) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) { +func (e *enqueueRequestsFromMapFunc) Generic(ctx context.Context, evt event.GenericEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} - e.mapAndEnqueue(q, evt.Object, reqs) + e.mapAndEnqueue(ctx, q, evt.Object, reqs) } -func (e *enqueueRequestsFromMapFunc) mapAndEnqueue(q workqueue.RateLimitingInterface, object client.Object, reqs map[reconcile.Request]empty) { - for _, req := range e.toRequests(object) { +func (e *enqueueRequestsFromMapFunc) mapAndEnqueue(ctx context.Context, q workqueue.RateLimitingInterface, object client.Object, reqs map[reconcile.Request]empty) { + for _, req := range e.toRequests(ctx, object) { _, ok := reqs[req] if !ok { q.Add(req) @@ -85,13 +86,3 @@ func (e *enqueueRequestsFromMapFunc) mapAndEnqueue(q workqueue.RateLimitingInter } } } - -// EnqueueRequestsFromMapFunc can inject fields into the mapper. - -// InjectFunc implements inject.Injector. -func (e *enqueueRequestsFromMapFunc) InjectFunc(f inject.Func) error { - if f == nil { - return nil - } - return f(e.toRequests) -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go index 63699893fc..02e7d756f8 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go @@ -17,6 +17,7 @@ limitations under the License. package handler import ( + "context" "fmt" "k8s.io/apimachinery/pkg/api/meta" @@ -25,15 +26,18 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" logf "sigs.k8s.io/controller-runtime/pkg/internal/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" ) -var _ EventHandler = &EnqueueRequestForOwner{} +var _ EventHandler = &enqueueRequestForOwner{} -var log = logf.RuntimeLog.WithName("eventhandler").WithName("EnqueueRequestForOwner") +var log = logf.RuntimeLog.WithName("eventhandler").WithName("enqueueRequestForOwner") + +// OwnerOption modifies an EnqueueRequestForOwner EventHandler. +type OwnerOption func(e *enqueueRequestForOwner) // EnqueueRequestForOwner enqueues Requests for the Owners of an object. E.g. the object that created // the object that was the source of the Event. @@ -42,13 +46,34 @@ var log = logf.RuntimeLog.WithName("eventhandler").WithName("EnqueueRequestForOw // // - a source.Kind Source with Type of Pod. // -// - a handler.EnqueueRequestForOwner EventHandler with an OwnerType of ReplicaSet and IsController set to true. -type EnqueueRequestForOwner struct { - // OwnerType is the type of the Owner object to look for in OwnerReferences. Only Group and Kind are compared. - OwnerType runtime.Object +// - a handler.enqueueRequestForOwner EventHandler with an OwnerType of ReplicaSet and OnlyControllerOwner set to true. +func EnqueueRequestForOwner(scheme *runtime.Scheme, mapper meta.RESTMapper, ownerType client.Object, opts ...OwnerOption) EventHandler { + e := &enqueueRequestForOwner{ + ownerType: ownerType, + mapper: mapper, + } + if err := e.parseOwnerTypeGroupKind(scheme); err != nil { + panic(err) + } + for _, opt := range opts { + opt(e) + } + return e +} + +// OnlyControllerOwner if provided will only look at the first OwnerReference with Controller: true. +func OnlyControllerOwner() OwnerOption { + return func(e *enqueueRequestForOwner) { + e.isController = true + } +} - // IsController if set will only look at the first OwnerReference with Controller: true. - IsController bool +type enqueueRequestForOwner struct { + // ownerType is the type of the Owner object to look for in OwnerReferences. Only Group and Kind are compared. + ownerType runtime.Object + + // isController if set will only look at the first OwnerReference with Controller: true. + isController bool // groupKind is the cached Group and Kind from OwnerType groupKind schema.GroupKind @@ -58,7 +83,7 @@ type EnqueueRequestForOwner struct { } // Create implements EventHandler. -func (e *EnqueueRequestForOwner) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { +func (e *enqueueRequestForOwner) Create(ctx context.Context, evt event.CreateEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} e.getOwnerReconcileRequest(evt.Object, reqs) for req := range reqs { @@ -67,7 +92,7 @@ func (e *EnqueueRequestForOwner) Create(evt event.CreateEvent, q workqueue.RateL } // Update implements EventHandler. -func (e *EnqueueRequestForOwner) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { +func (e *enqueueRequestForOwner) Update(ctx context.Context, evt event.UpdateEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} e.getOwnerReconcileRequest(evt.ObjectOld, reqs) e.getOwnerReconcileRequest(evt.ObjectNew, reqs) @@ -77,7 +102,7 @@ func (e *EnqueueRequestForOwner) Update(evt event.UpdateEvent, q workqueue.RateL } // Delete implements EventHandler. -func (e *EnqueueRequestForOwner) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { +func (e *enqueueRequestForOwner) Delete(ctx context.Context, evt event.DeleteEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} e.getOwnerReconcileRequest(evt.Object, reqs) for req := range reqs { @@ -86,7 +111,7 @@ func (e *EnqueueRequestForOwner) Delete(evt event.DeleteEvent, q workqueue.RateL } // Generic implements EventHandler. -func (e *EnqueueRequestForOwner) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) { +func (e *enqueueRequestForOwner) Generic(ctx context.Context, evt event.GenericEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} e.getOwnerReconcileRequest(evt.Object, reqs) for req := range reqs { @@ -96,17 +121,17 @@ func (e *EnqueueRequestForOwner) Generic(evt event.GenericEvent, q workqueue.Rat // parseOwnerTypeGroupKind parses the OwnerType into a Group and Kind and caches the result. Returns false // if the OwnerType could not be parsed using the scheme. -func (e *EnqueueRequestForOwner) parseOwnerTypeGroupKind(scheme *runtime.Scheme) error { +func (e *enqueueRequestForOwner) parseOwnerTypeGroupKind(scheme *runtime.Scheme) error { // Get the kinds of the type - kinds, _, err := scheme.ObjectKinds(e.OwnerType) + kinds, _, err := scheme.ObjectKinds(e.ownerType) if err != nil { - log.Error(err, "Could not get ObjectKinds for OwnerType", "owner type", fmt.Sprintf("%T", e.OwnerType)) + log.Error(err, "Could not get ObjectKinds for OwnerType", "owner type", fmt.Sprintf("%T", e.ownerType)) return err } // Expect only 1 kind. If there is more than one kind this is probably an edge case such as ListOptions. if len(kinds) != 1 { - err := fmt.Errorf("expected exactly 1 kind for OwnerType %T, but found %s kinds", e.OwnerType, kinds) - log.Error(nil, "expected exactly 1 kind for OwnerType", "owner type", fmt.Sprintf("%T", e.OwnerType), "kinds", kinds) + err := fmt.Errorf("expected exactly 1 kind for OwnerType %T, but found %s kinds", e.ownerType, kinds) + log.Error(nil, "expected exactly 1 kind for OwnerType", "owner type", fmt.Sprintf("%T", e.ownerType), "kinds", kinds) return err } // Cache the Group and Kind for the OwnerType @@ -116,7 +141,7 @@ func (e *EnqueueRequestForOwner) parseOwnerTypeGroupKind(scheme *runtime.Scheme) // getOwnerReconcileRequest looks at object and builds a map of reconcile.Request to reconcile // owners of object that match e.OwnerType. -func (e *EnqueueRequestForOwner) getOwnerReconcileRequest(object metav1.Object, result map[reconcile.Request]empty) { +func (e *enqueueRequestForOwner) getOwnerReconcileRequest(object metav1.Object, result map[reconcile.Request]empty) { // Iterate through the OwnerReferences looking for a match on Group and Kind against what was requested // by the user for _, ref := range e.getOwnersReferences(object) { @@ -138,7 +163,7 @@ func (e *EnqueueRequestForOwner) getOwnerReconcileRequest(object metav1.Object, Name: ref.Name, }} - // if owner is not namespaced then we should set the namespace to the empty + // if owner is not namespaced then we should not set the namespace mapping, err := e.mapper.RESTMapping(e.groupKind, refGV.Version) if err != nil { log.Error(err, "Could not retrieve rest mapping", "kind", e.groupKind) @@ -153,16 +178,16 @@ func (e *EnqueueRequestForOwner) getOwnerReconcileRequest(object metav1.Object, } } -// getOwnersReferences returns the OwnerReferences for an object as specified by the EnqueueRequestForOwner +// getOwnersReferences returns the OwnerReferences for an object as specified by the enqueueRequestForOwner // - if IsController is true: only take the Controller OwnerReference (if found) // - if IsController is false: take all OwnerReferences. -func (e *EnqueueRequestForOwner) getOwnersReferences(object metav1.Object) []metav1.OwnerReference { +func (e *enqueueRequestForOwner) getOwnersReferences(object metav1.Object) []metav1.OwnerReference { if object == nil { return nil } // If not filtered as Controller only, then use all the OwnerReferences - if !e.IsController { + if !e.isController { return object.GetOwnerReferences() } // If filtered to a Controller, only take the Controller OwnerReference @@ -172,18 +197,3 @@ func (e *EnqueueRequestForOwner) getOwnersReferences(object metav1.Object) []met // No Controller OwnerReference found return nil } - -var _ inject.Scheme = &EnqueueRequestForOwner{} - -// InjectScheme is called by the Controller to provide a singleton scheme to the EnqueueRequestForOwner. -func (e *EnqueueRequestForOwner) InjectScheme(s *runtime.Scheme) error { - return e.parseOwnerTypeGroupKind(s) -} - -var _ inject.Mapper = &EnqueueRequestForOwner{} - -// InjectMapper is called by the Controller to provide the rest mapper used by the manager. -func (e *EnqueueRequestForOwner) InjectMapper(m meta.RESTMapper) error { - e.mapper = m - return nil -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go index 8652d22d72..2f380f4fc4 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go @@ -17,6 +17,8 @@ limitations under the License. package handler import ( + "context" + "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/event" ) @@ -41,17 +43,17 @@ import ( // Most users shouldn't need to implement their own EventHandler. type EventHandler interface { // Create is called in response to an create event - e.g. Pod Creation. - Create(event.CreateEvent, workqueue.RateLimitingInterface) + Create(context.Context, event.CreateEvent, workqueue.RateLimitingInterface) // Update is called in response to an update event - e.g. Pod Updated. - Update(event.UpdateEvent, workqueue.RateLimitingInterface) + Update(context.Context, event.UpdateEvent, workqueue.RateLimitingInterface) // Delete is called in response to a delete event - e.g. Pod Deleted. - Delete(event.DeleteEvent, workqueue.RateLimitingInterface) + Delete(context.Context, event.DeleteEvent, workqueue.RateLimitingInterface) // Generic is called in response to an event of an unknown type or a synthetic event triggered as a cron or // external trigger request - e.g. reconcile Autoscaling, or a Webhook. - Generic(event.GenericEvent, workqueue.RateLimitingInterface) + Generic(context.Context, event.GenericEvent, workqueue.RateLimitingInterface) } var _ EventHandler = Funcs{} @@ -60,45 +62,45 @@ var _ EventHandler = Funcs{} type Funcs struct { // Create is called in response to an add event. Defaults to no-op. // RateLimitingInterface is used to enqueue reconcile.Requests. - CreateFunc func(event.CreateEvent, workqueue.RateLimitingInterface) + CreateFunc func(context.Context, event.CreateEvent, workqueue.RateLimitingInterface) // Update is called in response to an update event. Defaults to no-op. // RateLimitingInterface is used to enqueue reconcile.Requests. - UpdateFunc func(event.UpdateEvent, workqueue.RateLimitingInterface) + UpdateFunc func(context.Context, event.UpdateEvent, workqueue.RateLimitingInterface) // Delete is called in response to a delete event. Defaults to no-op. // RateLimitingInterface is used to enqueue reconcile.Requests. - DeleteFunc func(event.DeleteEvent, workqueue.RateLimitingInterface) + DeleteFunc func(context.Context, event.DeleteEvent, workqueue.RateLimitingInterface) // GenericFunc is called in response to a generic event. Defaults to no-op. // RateLimitingInterface is used to enqueue reconcile.Requests. - GenericFunc func(event.GenericEvent, workqueue.RateLimitingInterface) + GenericFunc func(context.Context, event.GenericEvent, workqueue.RateLimitingInterface) } // Create implements EventHandler. -func (h Funcs) Create(e event.CreateEvent, q workqueue.RateLimitingInterface) { +func (h Funcs) Create(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) { if h.CreateFunc != nil { - h.CreateFunc(e, q) + h.CreateFunc(ctx, e, q) } } // Delete implements EventHandler. -func (h Funcs) Delete(e event.DeleteEvent, q workqueue.RateLimitingInterface) { +func (h Funcs) Delete(ctx context.Context, e event.DeleteEvent, q workqueue.RateLimitingInterface) { if h.DeleteFunc != nil { - h.DeleteFunc(e, q) + h.DeleteFunc(ctx, e, q) } } // Update implements EventHandler. -func (h Funcs) Update(e event.UpdateEvent, q workqueue.RateLimitingInterface) { +func (h Funcs) Update(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) { if h.UpdateFunc != nil { - h.UpdateFunc(e, q) + h.UpdateFunc(ctx, e, q) } } // Generic implements EventHandler. -func (h Funcs) Generic(e event.GenericEvent, q workqueue.RateLimitingInterface) { +func (h Funcs) Generic(ctx context.Context, e event.GenericEvent, q workqueue.RateLimitingInterface) { if h.GenericFunc != nil { - h.GenericFunc(e, q) + h.GenericFunc(ctx, e, q) } } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go index f7734695ce..83aba28cb7 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go @@ -33,12 +33,9 @@ import ( logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" "sigs.k8s.io/controller-runtime/pkg/source" ) -var _ inject.Injector = &Controller{} - // Controller implements controller.Controller. type Controller struct { // Name is used to uniquely identify a Controller in tracing, logging and monitoring. Name is required. @@ -61,10 +58,6 @@ type Controller struct { // the Queue for processing Queue workqueue.RateLimitingInterface - // SetFields is used to inject dependencies into other objects such as Sources, EventHandlers and Predicates - // Deprecated: the caller should handle injected fields itself. - SetFields func(i interface{}) error - // mu is used to synchronize Controller setup mu sync.Mutex @@ -93,6 +86,9 @@ type Controller struct { // RecoverPanic indicates whether the panic caused by reconcile should be recovered. RecoverPanic *bool + + // LeaderElected indicates whether the controller is leader elected or always running. + LeaderElected *bool } // watchDescription contains all the information necessary to start a watch. @@ -127,19 +123,6 @@ func (c *Controller) Watch(src source.Source, evthdler handler.EventHandler, prc c.mu.Lock() defer c.mu.Unlock() - // Inject Cache into arguments - if err := c.SetFields(src); err != nil { - return err - } - if err := c.SetFields(evthdler); err != nil { - return err - } - for _, pr := range prct { - if err := c.SetFields(pr); err != nil { - return err - } - } - // Controller hasn't started yet, store the watches locally and return. // // These watches are going to be held on the controller struct until the manager or user calls Start(...). @@ -152,6 +135,14 @@ func (c *Controller) Watch(src source.Source, evthdler handler.EventHandler, prc return src.Start(c.ctx, evthdler, c.Queue, prct...) } +// NeedLeaderElection implements the manager.LeaderElectionRunnable interface. +func (c *Controller) NeedLeaderElection() bool { + if c.LeaderElected == nil { + return true + } + return *c.LeaderElected +} + // Start implements controller.Controller. func (c *Controller) Start(ctx context.Context) error { // use an IIFE to get proper lock handling @@ -323,7 +314,11 @@ func (c *Controller) reconcileHandler(ctx context.Context, obj interface{}) { result, err := c.Reconcile(ctx, req) switch { case err != nil: - c.Queue.AddRateLimited(req) + if errors.Is(err, reconcile.TerminalError(nil)) { + ctrlmetrics.TerminalReconcileErrors.WithLabelValues(c.Name).Inc() + } else { + c.Queue.AddRateLimited(req) + } ctrlmetrics.ReconcileErrors.WithLabelValues(c.Name).Inc() ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelError).Inc() log.Error(err, "Reconciler error") @@ -351,12 +346,6 @@ func (c *Controller) GetLogger() logr.Logger { return c.LogConstructor(nil) } -// InjectFunc implement SetFields.Injector. -func (c *Controller) InjectFunc(f inject.Func) error { - c.SetFields = f - return nil -} - // updateMetrics updates prometheus metrics within the controller. func (c *Controller) updateMetrics(reconcileTime time.Duration) { ctrlmetrics.ReconcileTime.WithLabelValues(c.Name).Observe(reconcileTime.Seconds()) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics/metrics.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics/metrics.go index baec669277..b74ce062be 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics/metrics.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics/metrics.go @@ -39,6 +39,13 @@ var ( Help: "Total number of reconciliation errors per controller", }, []string{"controller"}) + // TerminalReconcileErrors is a prometheus counter metrics which holds the total + // number of terminal errors from the Reconciler. + TerminalReconcileErrors = prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "controller_runtime_terminal_reconcile_errors_total", + Help: "Total number of terminal reconciliation errors per controller", + }, []string{"controller"}) + // ReconcileTime is a prometheus metric which keeps track of the duration // of reconciliations. ReconcileTime = prometheus.NewHistogramVec(prometheus.HistogramOpts{ @@ -67,6 +74,7 @@ func init() { metrics.Registry.MustRegister( ReconcileTotal, ReconcileErrors, + TerminalReconcileErrors, ReconcileTime, WorkerCount, ActiveWorkers, diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go deleted file mode 100644 index 7057f3dbe4..0000000000 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go +++ /dev/null @@ -1,78 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package objectutil - -import ( - "errors" - "fmt" - - apimeta "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/client/apiutil" -) - -// FilterWithLabels returns a copy of the items in objs matching labelSel. -func FilterWithLabels(objs []runtime.Object, labelSel labels.Selector) ([]runtime.Object, error) { - outItems := make([]runtime.Object, 0, len(objs)) - for _, obj := range objs { - meta, err := apimeta.Accessor(obj) - if err != nil { - return nil, err - } - if labelSel != nil { - lbls := labels.Set(meta.GetLabels()) - if !labelSel.Matches(lbls) { - continue - } - } - outItems = append(outItems, obj.DeepCopyObject()) - } - return outItems, nil -} - -// IsAPINamespaced returns true if the object is namespace scoped. -// For unstructured objects the gvk is found from the object itself. -func IsAPINamespaced(obj runtime.Object, scheme *runtime.Scheme, restmapper apimeta.RESTMapper) (bool, error) { - gvk, err := apiutil.GVKForObject(obj, scheme) - if err != nil { - return false, err - } - - return IsAPINamespacedWithGVK(gvk, scheme, restmapper) -} - -// IsAPINamespacedWithGVK returns true if the object having the provided -// GVK is namespace scoped. -func IsAPINamespacedWithGVK(gk schema.GroupVersionKind, scheme *runtime.Scheme, restmapper apimeta.RESTMapper) (bool, error) { - restmapping, err := restmapper.RESTMapping(schema.GroupKind{Group: gk.Group, Kind: gk.Kind}) - if err != nil { - return false, fmt.Errorf("failed to get restmapping: %w", err) - } - - scope := restmapping.Scope.Name() - - if scope == "" { - return false, errors.New("scope cannot be identified, empty scope returned") - } - - if scope != apimeta.RESTScopeNameRoot { - return true, nil - } - return false, nil -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/recorder/recorder.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/recorder/recorder.go index 9d8b2f0740..21f0146ba3 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/recorder/recorder.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/recorder/recorder.go @@ -19,6 +19,7 @@ package recorder import ( "context" "fmt" + "net/http" "sync" "github.com/go-logr/logr" @@ -110,8 +111,12 @@ func (p *Provider) getBroadcaster() record.EventBroadcaster { } // NewProvider create a new Provider instance. -func NewProvider(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster EventBroadcasterProducer) (*Provider, error) { - corev1Client, err := corev1client.NewForConfig(config) +func NewProvider(config *rest.Config, httpClient *http.Client, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster EventBroadcasterProducer) (*Provider, error) { + if httpClient == nil { + panic("httpClient must not be nil") + } + + corev1Client, err := corev1client.NewForConfigAndClient(config, httpClient) if err != nil { return nil, fmt.Errorf("failed to init client: %w", err) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/source/internal/eventsource.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/source/event_handler.go similarity index 67% rename from vendor/sigs.k8s.io/controller-runtime/pkg/source/internal/eventsource.go rename to vendor/sigs.k8s.io/controller-runtime/pkg/internal/source/event_handler.go index f0cfe212ed..ae8404a1fa 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/source/internal/eventsource.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/source/event_handler.go @@ -17,6 +17,7 @@ limitations under the License. package internal import ( + "context" "fmt" "k8s.io/client-go/tools/cache" @@ -31,17 +32,39 @@ import ( var log = logf.RuntimeLog.WithName("source").WithName("EventHandler") -var _ cache.ResourceEventHandler = EventHandler{} +// NewEventHandler creates a new EventHandler. +func NewEventHandler(ctx context.Context, queue workqueue.RateLimitingInterface, handler handler.EventHandler, predicates []predicate.Predicate) *EventHandler { + return &EventHandler{ + ctx: ctx, + handler: handler, + queue: queue, + predicates: predicates, + } +} // EventHandler adapts a handler.EventHandler interface to a cache.ResourceEventHandler interface. type EventHandler struct { - EventHandler handler.EventHandler - Queue workqueue.RateLimitingInterface - Predicates []predicate.Predicate + // ctx stores the context that created the event handler + // that is used to propagate cancellation signals to each handler function. + ctx context.Context + + handler handler.EventHandler + queue workqueue.RateLimitingInterface + predicates []predicate.Predicate +} + +// HandlerFuncs converts EventHandler to a ResourceEventHandlerFuncs +// TODO: switch to ResourceEventHandlerDetailedFuncs with client-go 1.27 +func (e *EventHandler) HandlerFuncs() cache.ResourceEventHandlerFuncs { + return cache.ResourceEventHandlerFuncs{ + AddFunc: e.OnAdd, + UpdateFunc: e.OnUpdate, + DeleteFunc: e.OnDelete, + } } // OnAdd creates CreateEvent and calls Create on EventHandler. -func (e EventHandler) OnAdd(obj interface{}) { +func (e *EventHandler) OnAdd(obj interface{}) { c := event.CreateEvent{} // Pull Object out of the object @@ -53,18 +76,20 @@ func (e EventHandler) OnAdd(obj interface{}) { return } - for _, p := range e.Predicates { + for _, p := range e.predicates { if !p.Create(c) { return } } // Invoke create handler - e.EventHandler.Create(c, e.Queue) + ctx, cancel := context.WithCancel(e.ctx) + defer cancel() + e.handler.Create(ctx, c, e.queue) } // OnUpdate creates UpdateEvent and calls Update on EventHandler. -func (e EventHandler) OnUpdate(oldObj, newObj interface{}) { +func (e *EventHandler) OnUpdate(oldObj, newObj interface{}) { u := event.UpdateEvent{} if o, ok := oldObj.(client.Object); ok { @@ -84,18 +109,20 @@ func (e EventHandler) OnUpdate(oldObj, newObj interface{}) { return } - for _, p := range e.Predicates { + for _, p := range e.predicates { if !p.Update(u) { return } } // Invoke update handler - e.EventHandler.Update(u, e.Queue) + ctx, cancel := context.WithCancel(e.ctx) + defer cancel() + e.handler.Update(ctx, u, e.queue) } // OnDelete creates DeleteEvent and calls Delete on EventHandler. -func (e EventHandler) OnDelete(obj interface{}) { +func (e *EventHandler) OnDelete(obj interface{}) { d := event.DeleteEvent{} // Deal with tombstone events by pulling the object out. Tombstone events wrap the object in a @@ -114,6 +141,9 @@ func (e EventHandler) OnDelete(obj interface{}) { return } + // Set DeleteStateUnknown to true + d.DeleteStateUnknown = true + // Set obj to the tombstone obj obj = tombstone.Obj } @@ -127,12 +157,14 @@ func (e EventHandler) OnDelete(obj interface{}) { return } - for _, p := range e.Predicates { + for _, p := range e.predicates { if !p.Delete(d) { return } } // Invoke delete handler - e.EventHandler.Delete(d, e.Queue) + ctx, cancel := context.WithCancel(e.ctx) + defer cancel() + e.handler.Delete(ctx, d, e.queue) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/source/kind.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/source/kind.go new file mode 100644 index 0000000000..b3a8227125 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/source/kind.go @@ -0,0 +1,117 @@ +package internal + +import ( + "context" + "errors" + "fmt" + "time" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +// Kind is used to provide a source of events originating inside the cluster from Watches (e.g. Pod Create). +type Kind struct { + // Type is the type of object to watch. e.g. &v1.Pod{} + Type client.Object + + // Cache used to watch APIs + Cache cache.Cache + + // started may contain an error if one was encountered during startup. If its closed and does not + // contain an error, startup and syncing finished. + started chan error + startCancel func() +} + +// Start is internal and should be called only by the Controller to register an EventHandler with the Informer +// to enqueue reconcile.Requests. +func (ks *Kind) Start(ctx context.Context, handler handler.EventHandler, queue workqueue.RateLimitingInterface, + prct ...predicate.Predicate) error { + if ks.Type == nil { + return fmt.Errorf("must create Kind with a non-nil object") + } + if ks.Cache == nil { + return fmt.Errorf("must create Kind with a non-nil cache") + } + + // cache.GetInformer will block until its context is cancelled if the cache was already started and it can not + // sync that informer (most commonly due to RBAC issues). + ctx, ks.startCancel = context.WithCancel(ctx) + ks.started = make(chan error) + go func() { + var ( + i cache.Informer + lastErr error + ) + + // Tries to get an informer until it returns true, + // an error or the specified context is cancelled or expired. + if err := wait.PollUntilContextCancel(ctx, 10*time.Second, true, func(ctx context.Context) (bool, error) { + // Lookup the Informer from the Cache and add an EventHandler which populates the Queue + i, lastErr = ks.Cache.GetInformer(ctx, ks.Type) + if lastErr != nil { + kindMatchErr := &meta.NoKindMatchError{} + switch { + case errors.As(lastErr, &kindMatchErr): + log.Error(lastErr, "if kind is a CRD, it should be installed before calling Start", + "kind", kindMatchErr.GroupKind) + case runtime.IsNotRegisteredError(lastErr): + log.Error(lastErr, "kind must be registered to the Scheme") + default: + log.Error(lastErr, "failed to get informer from cache") + } + return false, nil // Retry. + } + return true, nil + }); err != nil { + if lastErr != nil { + ks.started <- fmt.Errorf("failed to get informer from cache: %w", lastErr) + return + } + ks.started <- err + return + } + + _, err := i.AddEventHandler(NewEventHandler(ctx, queue, handler, prct).HandlerFuncs()) + if err != nil { + ks.started <- err + return + } + if !ks.Cache.WaitForCacheSync(ctx) { + // Would be great to return something more informative here + ks.started <- errors.New("cache did not sync") + } + close(ks.started) + }() + + return nil +} + +func (ks *Kind) String() string { + if ks.Type != nil { + return fmt.Sprintf("kind source: %T", ks.Type) + } + return "kind source: unknown type" +} + +// WaitForSync implements SyncingSource to allow controllers to wait with starting +// workers until the cache is synced. +func (ks *Kind) WaitForSync(ctx context.Context) error { + select { + case err := <-ks.started: + return err + case <-ctx.Done(): + ks.startCancel() + if errors.Is(ctx.Err(), context.Canceled) { + return nil + } + return fmt.Errorf("timed out waiting for cache to be synced for Kind %T", ks.Type) + } +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go index c82447d919..c27b4305f8 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go @@ -25,7 +25,7 @@ import ( // loggerPromise knows how to populate a concrete logr.Logger // with options, given an actual base logger later on down the line. type loggerPromise struct { - logger *DelegatingLogSink + logger *delegatingLogSink childPromises []*loggerPromise promisesLock sync.Mutex @@ -33,7 +33,7 @@ type loggerPromise struct { tags []interface{} } -func (p *loggerPromise) WithName(l *DelegatingLogSink, name string) *loggerPromise { +func (p *loggerPromise) WithName(l *delegatingLogSink, name string) *loggerPromise { res := &loggerPromise{ logger: l, name: &name, @@ -47,7 +47,7 @@ func (p *loggerPromise) WithName(l *DelegatingLogSink, name string) *loggerPromi } // WithValues provides a new Logger with the tags appended. -func (p *loggerPromise) WithValues(l *DelegatingLogSink, tags ...interface{}) *loggerPromise { +func (p *loggerPromise) WithValues(l *delegatingLogSink, tags ...interface{}) *loggerPromise { res := &loggerPromise{ logger: l, tags: tags, @@ -84,12 +84,12 @@ func (p *loggerPromise) Fulfill(parentLogSink logr.LogSink) { } } -// DelegatingLogSink is a logsink that delegates to another logr.LogSink. +// delegatingLogSink is a logsink that delegates to another logr.LogSink. // If the underlying promise is not nil, it registers calls to sub-loggers with // the logging factory to be populated later, and returns a new delegating // logger. It expects to have *some* logr.Logger set at all times (generally // a no-op logger before the promises are fulfilled). -type DelegatingLogSink struct { +type delegatingLogSink struct { lock sync.RWMutex logger logr.LogSink promise *loggerPromise @@ -97,7 +97,8 @@ type DelegatingLogSink struct { } // Init implements logr.LogSink. -func (l *DelegatingLogSink) Init(info logr.RuntimeInfo) { +func (l *delegatingLogSink) Init(info logr.RuntimeInfo) { + eventuallyFulfillRoot() l.lock.Lock() defer l.lock.Unlock() l.info = info @@ -106,7 +107,8 @@ func (l *DelegatingLogSink) Init(info logr.RuntimeInfo) { // Enabled tests whether this Logger is enabled. For example, commandline // flags might be used to set the logging verbosity and disable some info // logs. -func (l *DelegatingLogSink) Enabled(level int) bool { +func (l *delegatingLogSink) Enabled(level int) bool { + eventuallyFulfillRoot() l.lock.RLock() defer l.lock.RUnlock() return l.logger.Enabled(level) @@ -118,7 +120,8 @@ func (l *DelegatingLogSink) Enabled(level int) bool { // the log line. The key/value pairs can then be used to add additional // variable information. The key/value pairs should alternate string // keys and arbitrary values. -func (l *DelegatingLogSink) Info(level int, msg string, keysAndValues ...interface{}) { +func (l *delegatingLogSink) Info(level int, msg string, keysAndValues ...interface{}) { + eventuallyFulfillRoot() l.lock.RLock() defer l.lock.RUnlock() l.logger.Info(level, msg, keysAndValues...) @@ -132,14 +135,16 @@ func (l *DelegatingLogSink) Info(level int, msg string, keysAndValues ...interfa // The msg field should be used to add context to any underlying error, // while the err field should be used to attach the actual error that // triggered this log line, if present. -func (l *DelegatingLogSink) Error(err error, msg string, keysAndValues ...interface{}) { +func (l *delegatingLogSink) Error(err error, msg string, keysAndValues ...interface{}) { + eventuallyFulfillRoot() l.lock.RLock() defer l.lock.RUnlock() l.logger.Error(err, msg, keysAndValues...) } // WithName provides a new Logger with the name appended. -func (l *DelegatingLogSink) WithName(name string) logr.LogSink { +func (l *delegatingLogSink) WithName(name string) logr.LogSink { + eventuallyFulfillRoot() l.lock.RLock() defer l.lock.RUnlock() @@ -151,7 +156,7 @@ func (l *DelegatingLogSink) WithName(name string) logr.LogSink { return sink } - res := &DelegatingLogSink{logger: l.logger} + res := &delegatingLogSink{logger: l.logger} promise := l.promise.WithName(res, name) res.promise = promise @@ -159,7 +164,8 @@ func (l *DelegatingLogSink) WithName(name string) logr.LogSink { } // WithValues provides a new Logger with the tags appended. -func (l *DelegatingLogSink) WithValues(tags ...interface{}) logr.LogSink { +func (l *delegatingLogSink) WithValues(tags ...interface{}) logr.LogSink { + eventuallyFulfillRoot() l.lock.RLock() defer l.lock.RUnlock() @@ -171,7 +177,7 @@ func (l *DelegatingLogSink) WithValues(tags ...interface{}) logr.LogSink { return sink } - res := &DelegatingLogSink{logger: l.logger} + res := &delegatingLogSink{logger: l.logger} promise := l.promise.WithValues(res, tags...) res.promise = promise @@ -181,16 +187,16 @@ func (l *DelegatingLogSink) WithValues(tags ...interface{}) logr.LogSink { // Fulfill switches the logger over to use the actual logger // provided, instead of the temporary initial one, if this method // has not been previously called. -func (l *DelegatingLogSink) Fulfill(actual logr.LogSink) { +func (l *delegatingLogSink) Fulfill(actual logr.LogSink) { if l.promise != nil { l.promise.Fulfill(actual) } } -// NewDelegatingLogSink constructs a new DelegatingLogSink which uses +// newDelegatingLogSink constructs a new DelegatingLogSink which uses // the given logger before its promise is fulfilled. -func NewDelegatingLogSink(initial logr.LogSink) *DelegatingLogSink { - l := &DelegatingLogSink{ +func newDelegatingLogSink(initial logr.LogSink) *delegatingLogSink { + l := &delegatingLogSink{ logger: initial, promise: &loggerPromise{promisesLock: sync.Mutex{}}, } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go index 082dce3adb..a79151c69e 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go @@ -35,7 +35,10 @@ package log import ( "context" - "sync" + "fmt" + "os" + "runtime/debug" + "sync/atomic" "time" "github.com/go-logr/logr" @@ -43,35 +46,24 @@ import ( // SetLogger sets a concrete logging implementation for all deferred Loggers. func SetLogger(l logr.Logger) { - loggerWasSetLock.Lock() - defer loggerWasSetLock.Unlock() - - loggerWasSet = true - dlog.Fulfill(l.GetSink()) + logFullfilled.Store(true) + rootLog.Fulfill(l.GetSink()) } -// It is safe to assume that if this wasn't set within the first 30 seconds of a binaries -// lifetime, it will never get set. The DelegatingLogSink causes a high number of memory -// allocations when not given an actual Logger, so we set a NullLogSink to avoid that. -// -// We need to keep the DelegatingLogSink because we have various inits() that get a logger from -// here. They will always get executed before any code that imports controller-runtime -// has a chance to run and hence to set an actual logger. -func init() { - // Init is blocking, so start a new goroutine - go func() { - time.Sleep(30 * time.Second) - loggerWasSetLock.Lock() - defer loggerWasSetLock.Unlock() - if !loggerWasSet { - dlog.Fulfill(NullLogSink{}) +func eventuallyFulfillRoot() { + if logFullfilled.Load() { + return + } + if time.Since(rootLogCreated).Seconds() >= 30 { + if logFullfilled.CompareAndSwap(false, true) { + fmt.Fprintf(os.Stderr, "[controller-runtime] log.SetLogger(...) was never called, logs will not be displayed:\n%s", debug.Stack()) + SetLogger(logr.New(NullLogSink{})) } - }() + } } var ( - loggerWasSetLock sync.Mutex - loggerWasSet bool + logFullfilled atomic.Bool ) // Log is the base logger used by kubebuilder. It delegates @@ -80,8 +72,10 @@ var ( // the first 30 seconds of a binaries lifetime, it will get // set to a NullLogSink. var ( - dlog = NewDelegatingLogSink(NullLogSink{}) - Log = logr.New(dlog) + rootLog, rootLogCreated = func() (*delegatingLogSink, time.Time) { + return newDelegatingLogSink(NullLogSink{}), time.Now() + }() + Log = logr.New(rootLog) ) // FromContext returns a logger with predefined values from a context.Context. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/log/zap/kube_helpers.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/zap/kube_helpers.go index 9824470240..3b4ebfdaa0 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/log/zap/kube_helpers.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/zap/kube_helpers.go @@ -24,7 +24,6 @@ import ( "go.uber.org/zap/zapcore" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" ) // KubeAwareEncoder is a Kubernetes-aware Zap Encoder. @@ -41,21 +40,6 @@ type KubeAwareEncoder struct { Verbose bool } -// namespacedNameWrapper is a zapcore.ObjectMarshaler for Kubernetes NamespacedName. -type namespacedNameWrapper struct { - types.NamespacedName -} - -func (w namespacedNameWrapper) MarshalLogObject(enc zapcore.ObjectEncoder) error { - if w.Namespace != "" { - enc.AddString("namespace", w.Namespace) - } - - enc.AddString("name", w.Name) - - return nil -} - // kubeObjectWrapper is a zapcore.ObjectMarshaler for Kubernetes objects. type kubeObjectWrapper struct { obj runtime.Object @@ -119,12 +103,6 @@ func (k *KubeAwareEncoder) EncodeEntry(entry zapcore.Entry, fields []zapcore.Fie Key: field.Key, Interface: kubeObjectWrapper{obj: val}, } - case types.NamespacedName: - fields[i] = zapcore.Field{ - Type: zapcore.ObjectMarshalerType, - Key: field.Key, - Interface: namespacedNameWrapper{NamespacedName: val}, - } } } } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go index 5ccff8b782..f298229e57 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go @@ -18,11 +18,11 @@ package manager import ( "context" - "crypto/tls" "errors" "fmt" "net" "net/http" + "net/http/pprof" "sync" "sync/atomic" "time" @@ -32,7 +32,6 @@ import ( "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" kerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/rest" "k8s.io/client-go/tools/leaderelection" "k8s.io/client-go/tools/leaderelection/resourcelock" @@ -41,12 +40,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/cluster" - "sigs.k8s.io/controller-runtime/pkg/config/v1alpha1" + "sigs.k8s.io/controller-runtime/pkg/config" "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/internal/httpserver" intrec "sigs.k8s.io/controller-runtime/pkg/internal/recorder" "sigs.k8s.io/controller-runtime/pkg/metrics" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" "sigs.k8s.io/controller-runtime/pkg/webhook" ) @@ -107,8 +105,11 @@ type controllerManager struct { // Healthz probe handler healthzHandler *healthz.Handler - // controllerOptions are the global controller options. - controllerOptions v1alpha1.ControllerConfigurationSpec + // pprofListener is used to serve pprof + pprofListener net.Listener + + // controllerConfig are the global controller options. + controllerConfig config.Controller // Logger is the logger that should be used by this manager. // If none is set, it defaults to log.Log global logger. @@ -128,18 +129,7 @@ type controllerManager struct { // election was configured. elected chan struct{} - // port is the port that the webhook server serves at. - port int - // host is the hostname that the webhook server binds to. - host string - // CertDir is the directory that contains the server key and certificate. - // if not set, webhook server would look up the server key and certificate in - // {TempDir}/k8s-webhook-server/serving-certs - certDir string - // tlsOpts is used to allow configuring the TLS config used for the webhook server. - tlsOpts []func(*tls.Config) - - webhookServer *webhook.Server + webhookServer webhook.Server // webhookServerOnce will be called in GetWebhookServer() to optionally initialize // webhookServer if unset, and Add() it to controllerManager. webhookServerOnce sync.Once @@ -191,31 +181,9 @@ func (cm *controllerManager) Add(r Runnable) error { } func (cm *controllerManager) add(r Runnable) error { - // Set dependencies on the object - if err := cm.SetFields(r); err != nil { - return err - } return cm.runnables.Add(r) } -// Deprecated: use the equivalent Options field to set a field. This method will be removed in v0.10. -func (cm *controllerManager) SetFields(i interface{}) error { - if err := cm.cluster.SetFields(i); err != nil { - return err - } - if _, err := inject.InjectorInto(cm.SetFields, i); err != nil { - return err - } - if _, err := inject.StopChannelInto(cm.internalProceduresStop, i); err != nil { - return err - } - if _, err := inject.LoggerInto(cm.logger, i); err != nil { - return err - } - - return nil -} - // AddMetricsExtraHandler adds extra handler served on path to the http server that serves metrics. func (cm *controllerManager) AddMetricsExtraHandler(path string, handler http.Handler) error { cm.Lock() @@ -272,6 +240,10 @@ func (cm *controllerManager) AddReadyzCheck(name string, check healthz.Checker) return nil } +func (cm *controllerManager) GetHTTPClient() *http.Client { + return cm.cluster.GetHTTPClient() +} + func (cm *controllerManager) GetConfig() *rest.Config { return cm.cluster.GetConfig() } @@ -304,15 +276,10 @@ func (cm *controllerManager) GetAPIReader() client.Reader { return cm.cluster.GetAPIReader() } -func (cm *controllerManager) GetWebhookServer() *webhook.Server { +func (cm *controllerManager) GetWebhookServer() webhook.Server { cm.webhookServerOnce.Do(func() { if cm.webhookServer == nil { - cm.webhookServer = &webhook.Server{ - Port: cm.port, - Host: cm.host, - CertDir: cm.certDir, - TLSOpts: cm.tlsOpts, - } + panic("webhook should not be nil") } if err := cm.Add(cm.webhookServer); err != nil { panic(fmt.Sprintf("unable to add webhook server to the controller manager: %s", err)) @@ -325,23 +292,29 @@ func (cm *controllerManager) GetLogger() logr.Logger { return cm.logger } -func (cm *controllerManager) GetControllerOptions() v1alpha1.ControllerConfigurationSpec { - return cm.controllerOptions +func (cm *controllerManager) GetControllerOptions() config.Controller { + return cm.controllerConfig } -func (cm *controllerManager) serveMetrics() { +func (cm *controllerManager) addMetricsServer() error { + mux := http.NewServeMux() + srv := httpserver.New(mux) + handler := promhttp.HandlerFor(metrics.Registry, promhttp.HandlerOpts{ ErrorHandling: promhttp.HTTPErrorOnError, }) // TODO(JoelSpeed): Use existing Kubernetes machinery for serving metrics - mux := http.NewServeMux() mux.Handle(defaultMetricsEndpoint, handler) for path, extraHandler := range cm.metricsExtraHandlers { mux.Handle(path, extraHandler) } - server := httpserver.New(mux) - go cm.httpServe("metrics", cm.logger.WithValues("path", defaultMetricsEndpoint), server, cm.metricsListener) + return cm.add(&server{ + Kind: "metrics", + Log: cm.logger.WithValues("path", defaultMetricsEndpoint), + Server: srv, + Listener: cm.metricsListener, + }) } func (cm *controllerManager) serveHealthProbes() { @@ -362,6 +335,24 @@ func (cm *controllerManager) serveHealthProbes() { go cm.httpServe("health probe", cm.logger, server, cm.healthProbeListener) } +func (cm *controllerManager) addPprofServer() error { + mux := http.NewServeMux() + srv := httpserver.New(mux) + + mux.HandleFunc("/debug/pprof/", pprof.Index) + mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) + mux.HandleFunc("/debug/pprof/profile", pprof.Profile) + mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) + mux.HandleFunc("/debug/pprof/trace", pprof.Trace) + + return cm.add(&server{ + Kind: "pprof", + Log: cm.logger, + Server: srv, + Listener: cm.pprofListener, + }) +} + func (cm *controllerManager) httpServe(kind string, log logr.Logger, server *http.Server, ln net.Listener) { log = log.WithValues("kind", kind, "addr", ln.Addr()) @@ -451,7 +442,9 @@ func (cm *controllerManager) Start(ctx context.Context) (err error) { // (If we don't serve metrics for non-leaders, prometheus will still scrape // the pod but will get a connection refused). if cm.metricsListener != nil { - cm.serveMetrics() + if err := cm.addMetricsServer(); err != nil { + return fmt.Errorf("failed to add metrics server: %w", err) + } } // Serve health probes. @@ -459,6 +452,13 @@ func (cm *controllerManager) Start(ctx context.Context) (err error) { cm.serveHealthProbes() } + // Add pprof server + if cm.pprofListener != nil { + if err := cm.addPprofServer(); err != nil { + return fmt.Errorf("failed to add pprof server: %w", err) + } + } + // First start any webhook servers, which includes conversion, validation, and defaulting // webhooks that are registered. // @@ -466,22 +466,22 @@ func (cm *controllerManager) Start(ctx context.Context) (err error) { // between conversion webhooks and the cache sync (usually initial list) which causes the webhooks // to never start because no cache can be populated. if err := cm.runnables.Webhooks.Start(cm.internalCtx); err != nil { - if !errors.Is(err, wait.ErrWaitTimeout) { - return err + if err != nil { + return fmt.Errorf("failed to start webhooks: %w", err) } } // Start and wait for caches. if err := cm.runnables.Caches.Start(cm.internalCtx); err != nil { - if !errors.Is(err, wait.ErrWaitTimeout) { - return err + if err != nil { + return fmt.Errorf("failed to start caches: %w", err) } } // Start the non-leaderelection Runnables after the cache has synced. if err := cm.runnables.Others.Start(cm.internalCtx); err != nil { - if !errors.Is(err, wait.ErrWaitTimeout) { - return err + if err != nil { + return fmt.Errorf("failed to start other runnables: %w", err) } } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go index 2facb1c915..72a4a7801a 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go @@ -19,6 +19,7 @@ package manager import ( "context" "crypto/tls" + "errors" "fmt" "net" "net/http" @@ -33,6 +34,7 @@ import ( "k8s.io/client-go/tools/leaderelection/resourcelock" "k8s.io/client-go/tools/record" "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/cluster" @@ -44,7 +46,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/metrics" "sigs.k8s.io/controller-runtime/pkg/recorder" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" "sigs.k8s.io/controller-runtime/pkg/webhook" ) @@ -55,8 +56,7 @@ type Manager interface { cluster.Cluster // Add will set requested dependencies on the component, and cause the component to be - // started when Start is called. Add will inject any dependencies for which the argument - // implements the inject interface - e.g. inject.Client. + // started when Start is called. // Depending on if a Runnable implements LeaderElectionRunnable interface, a Runnable can be run in either // non-leaderelection mode (always running) or leader election mode (managed by leader election if enabled). Add(Runnable) error @@ -88,13 +88,13 @@ type Manager interface { Start(ctx context.Context) error // GetWebhookServer returns a webhook.Server - GetWebhookServer() *webhook.Server + GetWebhookServer() webhook.Server // GetLogger returns this manager's logger. GetLogger() logr.Logger // GetControllerOptions returns controller global configuration options. - GetControllerOptions() v1alpha1.ControllerConfigurationSpec + GetControllerOptions() config.Controller } // Options are the arguments for creating a new Manager. @@ -102,10 +102,44 @@ type Options struct { // Scheme is the scheme used to resolve runtime.Objects to GroupVersionKinds / Resources. // Defaults to the kubernetes/client-go scheme.Scheme, but it's almost always better // to pass your own scheme in. See the documentation in pkg/scheme for more information. + // + // If set, the Scheme will be used to create the default Client and Cache. Scheme *runtime.Scheme - // MapperProvider provides the rest mapper used to map go types to Kubernetes APIs - MapperProvider func(c *rest.Config) (meta.RESTMapper, error) + // MapperProvider provides the rest mapper used to map go types to Kubernetes APIs. + // + // If set, the RESTMapper returned by this function is used to create the RESTMapper + // used by the Client and Cache. + MapperProvider func(c *rest.Config, httpClient *http.Client) (meta.RESTMapper, error) + + // Cache is the cache.Options that will be used to create the default Cache. + // By default, the cache will watch and list requested objects in all namespaces. + Cache cache.Options + + // NewCache is the function that will create the cache to be used + // by the manager. If not set this will use the default new cache function. + // + // When using a custom NewCache, the Cache options will be passed to the + // NewCache function. + // + // NOTE: LOW LEVEL PRIMITIVE! + // Only use a custom NewCache if you know what you are doing. + NewCache cache.NewCacheFunc + + // Client is the client.Options that will be used to create the default Client. + // By default, the client will use the cache for reads and direct calls for writes. + Client client.Options + + // NewClient is the func that creates the client to be used by the manager. + // If not set this will create a Client backed by a Cache for read operations + // and a direct Client for write operations. + // + // When using a custom NewClient, the Client options will be passed to the + // NewClient function. + // + // NOTE: LOW LEVEL PRIMITIVE! + // Only use a custom NewClient if you know what you are doing. + NewClient client.NewClientFunc // SyncPeriod determines the minimum frequency at which watched resources are // reconciled. A lower period will correct entropy more quickly, but reduce @@ -132,6 +166,8 @@ type Options struct { // is "done" with an object, and would otherwise not requeue it, i.e., we // recommend the `Reconcile` function return `reconcile.Result{RequeueAfter: t}`, // instead of `reconcile.Result{}`. + // + // Deprecated: Use Cache.SyncPeriod instead. SyncPeriod *time.Duration // Logger is the logger that should be used by this manager. @@ -217,6 +253,8 @@ type Options struct { // Note: If a namespace is specified, controllers can still Watch for a // cluster-scoped resource (e.g Node). For namespaced resources, the cache // will only hold objects from the desired namespace. + // + // Deprecated: Use Cache.Namespaces instead. Namespace string // MetricsBindAddress is the TCP address that the controller should bind to @@ -235,11 +273,22 @@ type Options struct { // Liveness probe endpoint name, defaults to "healthz" LivenessEndpointName string + // PprofBindAddress is the TCP address that the controller should bind to + // for serving pprof. + // It can be set to "" or "0" to disable the pprof serving. + // Since pprof may contain sensitive information, make sure to protect it + // before exposing it to public. + PprofBindAddress string + // Port is the port that the webhook server serves at. // It is used to set webhook.Server.Port if WebhookServer is not set. + // + // Deprecated: Use WebhookServer instead. A WebhookServer can be created via webhook.NewServer. Port int // Host is the hostname that the webhook server binds to. // It is used to set webhook.Server.Host if WebhookServer is not set. + // + // Deprecated: Use WebhookServer instead. A WebhookServer can be created via webhook.NewServer. Host string // CertDir is the directory that contains the server key and certificate. @@ -247,26 +296,19 @@ type Options struct { // {TempDir}/k8s-webhook-server/serving-certs. The server key and certificate // must be named tls.key and tls.crt, respectively. // It is used to set webhook.Server.CertDir if WebhookServer is not set. + // + // Deprecated: Use WebhookServer instead. A WebhookServer can be created via webhook.NewServer. CertDir string // TLSOpts is used to allow configuring the TLS config used for the webhook server. + // + // Deprecated: Use WebhookServer instead. A WebhookServer can be created via webhook.NewServer. TLSOpts []func(*tls.Config) // WebhookServer is an externally configured webhook.Server. By default, // a Manager will create a default server using Port, Host, and CertDir; // if this is set, the Manager will use this server instead. - WebhookServer *webhook.Server - - // Functions to allow for a user to customize values that will be injected. - - // NewCache is the function that will create the cache to be used - // by the manager. If not set this will use the default new cache function. - NewCache cache.NewCacheFunc - - // NewClient is the func that creates the client to be used by the manager. - // If not set this will create the default DelegatingClient that will - // use the cache for reads and the client for writes. - NewClient cluster.NewClientFunc + WebhookServer webhook.Server // BaseContext is the function that provides Context values to Runnables // managed by the Manager. If a BaseContext function isn't provided, Runnables @@ -275,10 +317,14 @@ type Options struct { // ClientDisableCacheFor tells the client that, if any cache is used, to bypass it // for the given objects. + // + // Deprecated: Use Client.Cache.DisableCacheFor instead. ClientDisableCacheFor []client.Object // DryRunClient specifies whether the client should be configured to enforce // dryRun mode. + // + // Deprecated: Use Client.DryRun instead. DryRunClient bool // EventBroadcaster records Events emitted by the manager and sends them to the Kubernetes API @@ -297,7 +343,7 @@ type Options struct { // Controller contains global configuration options for controllers // registered within this manager. // +optional - Controller v1alpha1.ControllerConfigurationSpec + Controller config.Controller // makeBroadcaster allows deferring the creation of the broadcaster to // avoid leaking goroutines if we never call Start on this manager. It also @@ -306,10 +352,11 @@ type Options struct { makeBroadcaster intrec.EventBroadcasterProducer // Dependency injection for testing - newRecorderProvider func(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster intrec.EventBroadcasterProducer) (*intrec.Provider, error) + newRecorderProvider func(config *rest.Config, httpClient *http.Client, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster intrec.EventBroadcasterProducer) (*intrec.Provider, error) newResourceLock func(config *rest.Config, recorderProvider recorder.Provider, options leaderelection.Options) (resourcelock.Interface, error) newMetricsListener func(addr string) (net.Listener, error) newHealthProbeListener func(addr string) (net.Listener, error) + newPprofListener func(addr string) (net.Listener, error) } // BaseContextFunc is a function used to provide a base Context to Runnables @@ -345,6 +392,9 @@ type LeaderElectionRunnable interface { // New returns a new Manager for creating Controllers. func New(config *rest.Config, options Options) (Manager, error) { + if config == nil { + return nil, errors.New("must specify Config") + } // Set default values for options fields options = setOptionsDefaults(options) @@ -353,21 +403,28 @@ func New(config *rest.Config, options Options) (Manager, error) { clusterOptions.MapperProvider = options.MapperProvider clusterOptions.Logger = options.Logger clusterOptions.SyncPeriod = options.SyncPeriod - clusterOptions.Namespace = options.Namespace clusterOptions.NewCache = options.NewCache clusterOptions.NewClient = options.NewClient - clusterOptions.ClientDisableCacheFor = options.ClientDisableCacheFor - clusterOptions.DryRunClient = options.DryRunClient - clusterOptions.EventBroadcaster = options.EventBroadcaster //nolint:staticcheck + clusterOptions.Cache = options.Cache + clusterOptions.Client = options.Client + clusterOptions.Namespace = options.Namespace //nolint:staticcheck + clusterOptions.ClientDisableCacheFor = options.ClientDisableCacheFor //nolint:staticcheck + clusterOptions.DryRunClient = options.DryRunClient //nolint:staticcheck + clusterOptions.EventBroadcaster = options.EventBroadcaster //nolint:staticcheck }) if err != nil { return nil, err } + config = rest.CopyConfig(config) + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + // Create the recorder provider to inject event recorders for the components. // TODO(directxman12): the log for the event provider should have a context (name, tags, etc) specific // to the particular controller that it's being injected into, rather than a generic one like is here. - recorderProvider, err := options.newRecorderProvider(config, cluster.GetScheme(), options.Logger.WithName("events"), options.makeBroadcaster) + recorderProvider, err := options.newRecorderProvider(config, cluster.GetHTTPClient(), cluster.GetScheme(), options.Logger.WithName("events"), options.makeBroadcaster) if err != nil { return nil, err } @@ -381,7 +438,7 @@ func New(config *rest.Config, options Options) (Manager, error) { leaderRecorderProvider = recorderProvider } else { leaderConfig = rest.CopyConfig(options.LeaderElectionConfig) - leaderRecorderProvider, err = options.newRecorderProvider(leaderConfig, cluster.GetScheme(), options.Logger.WithName("events"), options.makeBroadcaster) + leaderRecorderProvider, err = options.newRecorderProvider(leaderConfig, cluster.GetHTTPClient(), cluster.GetScheme(), options.Logger.WithName("events"), options.makeBroadcaster) if err != nil { return nil, err } @@ -419,6 +476,13 @@ func New(config *rest.Config, options Options) (Manager, error) { return nil, err } + // Create pprof listener. This will throw an error if the bind + // address is invalid or already in use. + pprofListener, err := options.newPprofListener(options.PprofBindAddress) + if err != nil { + return nil, fmt.Errorf("failed to new pprof listener: %w", err) + } + errChan := make(chan error) runnables := newRunnables(options.BaseContext, errChan) @@ -431,13 +495,9 @@ func New(config *rest.Config, options Options) (Manager, error) { resourceLock: resourceLock, metricsListener: metricsListener, metricsExtraHandlers: metricsExtraHandlers, - controllerOptions: options.Controller, + controllerConfig: options.Controller, logger: options.Logger, elected: make(chan struct{}), - port: options.Port, - host: options.Host, - certDir: options.CertDir, - tlsOpts: options.TLSOpts, webhookServer: options.WebhookServer, leaderElectionID: options.LeaderElectionID, leaseDuration: *options.LeaseDuration, @@ -446,6 +506,7 @@ func New(config *rest.Config, options Options) (Manager, error) { healthProbeListener: healthProbeListener, readinessEndpointName: options.ReadinessEndpointName, livenessEndpointName: options.LivenessEndpointName, + pprofListener: pprofListener, gracefulShutdownTimeout: *options.GracefulShutdownTimeout, internalProceduresStop: make(chan struct{}), leaderElectionStopped: make(chan struct{}), @@ -456,14 +517,14 @@ func New(config *rest.Config, options Options) (Manager, error) { // AndFrom will use a supplied type and convert to Options // any options already set on Options will be ignored, this is used to allow // cli flags to override anything specified in the config file. +// +// Deprecated: This function has been deprecated and will be removed in a future release, +// The Component Configuration package has been unmaintained for over a year and is no longer +// actively developed. Users should migrate to their own configuration format +// and configure Manager.Options directly. +// See https://github.com/kubernetes-sigs/controller-runtime/issues/895 +// for more information, feedback, and comments. func (o Options) AndFrom(loader config.ControllerManagerConfiguration) (Options, error) { - if inj, wantsScheme := loader.(inject.Scheme); wantsScheme { - err := inj.InjectScheme(o.Scheme) - if err != nil { - return o, err - } - } - newObj, err := loader.Complete() if err != nil { return o, err @@ -498,18 +559,23 @@ func (o Options) AndFrom(loader config.ControllerManagerConfiguration) (Options, if o.Port == 0 && newObj.Webhook.Port != nil { o.Port = *newObj.Webhook.Port } - if o.Host == "" && newObj.Webhook.Host != "" { o.Host = newObj.Webhook.Host } - if o.CertDir == "" && newObj.Webhook.CertDir != "" { o.CertDir = newObj.Webhook.CertDir } + if o.WebhookServer == nil { + o.WebhookServer = webhook.NewServer(webhook.Options{ + Port: o.Port, + Host: o.Host, + CertDir: o.CertDir, + }) + } if newObj.Controller != nil { - if o.Controller.CacheSyncTimeout == nil && newObj.Controller.CacheSyncTimeout != nil { - o.Controller.CacheSyncTimeout = newObj.Controller.CacheSyncTimeout + if o.Controller.CacheSyncTimeout == 0 && newObj.Controller.CacheSyncTimeout != nil { + o.Controller.CacheSyncTimeout = *newObj.Controller.CacheSyncTimeout } if len(o.Controller.GroupKindConcurrency) == 0 && len(newObj.Controller.GroupKindConcurrency) > 0 { @@ -521,6 +587,13 @@ func (o Options) AndFrom(loader config.ControllerManagerConfiguration) (Options, } // AndFromOrDie will use options.AndFrom() and will panic if there are errors. +// +// Deprecated: This function has been deprecated and will be removed in a future release, +// The Component Configuration package has been unmaintained for over a year and is no longer +// actively developed. Users should migrate to their own configuration format +// and configure Manager.Options directly. +// See https://github.com/kubernetes-sigs/controller-runtime/issues/895 +// for more information, feedback, and comments. func (o Options) AndFromOrDie(loader config.ControllerManagerConfiguration) Options { o, err := o.AndFrom(loader) if err != nil { @@ -579,6 +652,19 @@ func defaultHealthProbeListener(addr string) (net.Listener, error) { return ln, nil } +// defaultPprofListener creates the default pprof listener bound to the given address. +func defaultPprofListener(addr string) (net.Listener, error) { + if addr == "" || addr == "0" { + return nil, nil + } + + ln, err := net.Listen("tcp", addr) + if err != nil { + return nil, fmt.Errorf("error listening on %s: %w", addr, err) + } + return ln, nil +} + // defaultBaseContext is used as the BaseContext value in Options if one // has not already been set. func defaultBaseContext() context.Context { @@ -639,6 +725,10 @@ func setOptionsDefaults(options Options) Options { options.newHealthProbeListener = defaultHealthProbeListener } + if options.newPprofListener == nil { + options.newPprofListener = defaultPprofListener + } + if options.GracefulShutdownTimeout == nil { gracefulShutdownTimeout := defaultGracefulShutdownPeriod options.GracefulShutdownTimeout = &gracefulShutdownTimeout @@ -652,5 +742,14 @@ func setOptionsDefaults(options Options) Options { options.BaseContext = defaultBaseContext } + if options.WebhookServer == nil { + options.WebhookServer = webhook.NewServer(webhook.Options{ + Host: options.Host, + Port: options.Port, + CertDir: options.CertDir, + TLSOpts: options.TLSOpts, + }) + } + return options } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go index f7b91a209f..549741e6e5 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go @@ -56,7 +56,7 @@ func (r *runnables) Add(fn Runnable) error { return r.Caches.Add(fn, func(ctx context.Context) bool { return runnable.GetCache().WaitForCacheSync(ctx) }) - case *webhook.Server: + case webhook.Server: return r.Webhooks.Add(fn, nil) case LeaderElectionRunnable: if !runnable.NeedLeaderElection() { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/server.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/server.go new file mode 100644 index 0000000000..b6509f48f2 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/server.go @@ -0,0 +1,61 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package manager + +import ( + "context" + "errors" + "net" + "net/http" + + "github.com/go-logr/logr" +) + +// server is a general purpose HTTP server Runnable for a manager +// to serve some internal handlers such as health probes, metrics and profiling. +type server struct { + Kind string + Log logr.Logger + Server *http.Server + Listener net.Listener +} + +func (s *server) Start(ctx context.Context) error { + log := s.Log.WithValues("kind", s.Kind, "addr", s.Listener.Addr()) + + serverShutdown := make(chan struct{}) + go func() { + <-ctx.Done() + log.Info("shutting down server") + if err := s.Server.Shutdown(context.Background()); err != nil { + log.Error(err, "error shutting down server") + } + close(serverShutdown) + }() + + log.Info("starting server") + if err := s.Server.Serve(s.Listener); err != nil && !errors.Is(err, http.ErrServerClosed) { + return err + } + + <-serverShutdown + return nil +} + +func (s *server) NeedLeaderElection() bool { + return false +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go index a8b43ea0a4..ff28998c44 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go @@ -18,8 +18,6 @@ package metrics import ( "context" - "net/url" - "time" "github.com/prometheus/client_golang/prometheus" clientmetrics "k8s.io/client-go/tools/metrics" @@ -29,70 +27,9 @@ import ( // that client-go registers metrics. We copy the names and formats // from Kubernetes so that we match the core controllers. -// Metrics subsystem and all of the keys used by the rest client. -const ( - RestClientSubsystem = "rest_client" - LatencyKey = "request_latency_seconds" - ResultKey = "requests_total" -) - var ( // client metrics. - // RequestLatency reports the request latency in seconds per verb/URL. - // Deprecated: This metric is deprecated for removal in a future release: using the URL as a - // dimension results in cardinality explosion for some consumers. It was deprecated upstream - // in k8s v1.14 and hidden in v1.17 via https://github.com/kubernetes/kubernetes/pull/83836. - // It is not registered by default. To register: - // import ( - // clientmetrics "k8s.io/client-go/tools/metrics" - // clmetrics "sigs.k8s.io/controller-runtime/metrics" - // ) - // - // func init() { - // clmetrics.Registry.MustRegister(clmetrics.RequestLatency) - // clientmetrics.Register(clientmetrics.RegisterOpts{ - // RequestLatency: clmetrics.LatencyAdapter - // }) - // } - RequestLatency = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Subsystem: RestClientSubsystem, - Name: LatencyKey, - Help: "Request latency in seconds. Broken down by verb and URL.", - Buckets: prometheus.ExponentialBuckets(0.001, 2, 10), - }, []string{"verb", "url"}) - - // requestLatency is a Prometheus Histogram metric type partitioned by - // "verb", and "host" labels. It is used for the rest client latency metrics. - requestLatency = prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Name: "rest_client_request_duration_seconds", - Help: "Request latency in seconds. Broken down by verb, and host.", - Buckets: []float64{0.005, 0.025, 0.1, 0.25, 0.5, 1.0, 2.0, 4.0, 8.0, 15.0, 30.0, 60.0}, - }, - []string{"verb", "host"}, - ) - - requestSize = prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Name: "rest_client_request_size_bytes", - Help: "Request size in bytes. Broken down by verb and host.", - // 64 bytes to 16MB - Buckets: []float64{64, 256, 512, 1024, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216}, - }, - []string{"verb", "host"}, - ) - - responseSize = prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Name: "rest_client_response_size_bytes", - Help: "Response size in bytes. Broken down by verb and host.", - // 64 bytes to 16MB - Buckets: []float64{64, 256, 512, 1024, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216}, - }, - []string{"verb", "host"}, - ) - requestResult = prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "rest_client_requests_total", @@ -109,17 +46,11 @@ func init() { // registerClientMetrics sets up the client latency metrics from client-go. func registerClientMetrics() { // register the metrics with our registry - Registry.MustRegister(requestLatency) - Registry.MustRegister(requestSize) - Registry.MustRegister(responseSize) Registry.MustRegister(requestResult) // register the metrics with client-go clientmetrics.Register(clientmetrics.RegisterOpts{ - RequestLatency: &LatencyAdapter{metric: requestLatency}, - RequestSize: &sizeAdapter{metric: requestSize}, - ResponseSize: &sizeAdapter{metric: responseSize}, - RequestResult: &resultAdapter{metric: requestResult}, + RequestResult: &resultAdapter{metric: requestResult}, }) } @@ -131,24 +62,6 @@ func registerClientMetrics() { // copied (more-or-less directly) from k8s.io/kubernetes setup code // (which isn't anywhere in an easily-importable place). -// LatencyAdapter implements LatencyMetric. -type LatencyAdapter struct { - metric *prometheus.HistogramVec -} - -// Observe increments the request latency metric for the given verb/URL. -func (l *LatencyAdapter) Observe(_ context.Context, verb string, u url.URL, latency time.Duration) { - l.metric.WithLabelValues(verb, u.String()).Observe(latency.Seconds()) -} - -type sizeAdapter struct { - metric *prometheus.HistogramVec -} - -func (s *sizeAdapter) Observe(ctx context.Context, verb string, host string, size float64) { - s.metric.WithLabelValues(verb, host).Observe(size) -} - type resultAdapter struct { metric *prometheus.CounterVec } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go b/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go index 8b0f3634e4..314635875e 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go @@ -24,7 +24,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" logf "sigs.k8s.io/controller-runtime/pkg/internal/log" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" ) var log = logf.RuntimeLog.WithName("predicate").WithName("eventFilters") @@ -242,15 +241,6 @@ type and struct { predicates []Predicate } -func (a and) InjectFunc(f inject.Func) error { - for _, p := range a.predicates { - if err := f(p); err != nil { - return err - } - } - return nil -} - func (a and) Create(e event.CreateEvent) bool { for _, p := range a.predicates { if !p.Create(e) { @@ -296,15 +286,6 @@ type or struct { predicates []Predicate } -func (o or) InjectFunc(f inject.Func) error { - for _, p := range o.predicates { - if err := f(p); err != nil { - return err - } - } - return nil -} - func (o or) Create(e event.CreateEvent) bool { for _, p := range o.predicates { if p.Create(e) { @@ -350,10 +331,6 @@ type not struct { predicate Predicate } -func (n not) InjectFunc(f inject.Func) error { - return f(n.predicate) -} - func (n not) Create(e event.CreateEvent) bool { return !n.predicate.Create(e) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go b/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go index 8285e2ca9b..4c8f8357a5 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go @@ -18,6 +18,7 @@ package reconcile import ( "context" + "errors" "time" "k8s.io/apimachinery/pkg/types" @@ -100,3 +101,30 @@ var _ Reconciler = Func(nil) // Reconcile implements Reconciler. func (r Func) Reconcile(ctx context.Context, o Request) (Result, error) { return r(ctx, o) } + +// TerminalError is an error that will not be retried but still be logged +// and recorded in metrics. +func TerminalError(wrapped error) error { + return &terminalError{err: wrapped} +} + +type terminalError struct { + err error +} + +// This function will return nil if te.err is nil. +func (te *terminalError) Unwrap() error { + return te.err +} + +func (te *terminalError) Error() string { + if te.err == nil { + return "nil terminal error" + } + return "terminal error: " + te.err.Error() +} + +func (te *terminalError) Is(target error) bool { + tp := &terminalError{} + return errors.As(target, &tp) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/inject.go b/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/inject.go deleted file mode 100644 index c8c56ba817..0000000000 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/inject.go +++ /dev/null @@ -1,164 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package inject is used by a Manager to inject types into Sources, EventHandlers, Predicates, and Reconciles. -// Deprecated: Use manager.Options fields directly. This package will be removed in v0.10. -package inject - -import ( - "github.com/go-logr/logr" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/rest" - - "sigs.k8s.io/controller-runtime/pkg/cache" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -// Cache is used by the ControllerManager to inject Cache into Sources, EventHandlers, Predicates, and -// Reconciles. -type Cache interface { - InjectCache(cache cache.Cache) error -} - -// CacheInto will set informers on i and return the result if it implements Cache. Returns -// false if i does not implement Cache. -func CacheInto(c cache.Cache, i interface{}) (bool, error) { - if s, ok := i.(Cache); ok { - return true, s.InjectCache(c) - } - return false, nil -} - -// APIReader is used by the Manager to inject the APIReader into necessary types. -type APIReader interface { - InjectAPIReader(client.Reader) error -} - -// APIReaderInto will set APIReader on i and return the result if it implements APIReaderInto. -// Returns false if i does not implement APIReader. -func APIReaderInto(reader client.Reader, i interface{}) (bool, error) { - if s, ok := i.(APIReader); ok { - return true, s.InjectAPIReader(reader) - } - return false, nil -} - -// Config is used by the ControllerManager to inject Config into Sources, EventHandlers, Predicates, and -// Reconciles. -type Config interface { - InjectConfig(*rest.Config) error -} - -// ConfigInto will set config on i and return the result if it implements Config. Returns -// false if i does not implement Config. -func ConfigInto(config *rest.Config, i interface{}) (bool, error) { - if s, ok := i.(Config); ok { - return true, s.InjectConfig(config) - } - return false, nil -} - -// Client is used by the ControllerManager to inject client into Sources, EventHandlers, Predicates, and -// Reconciles. -type Client interface { - InjectClient(client.Client) error -} - -// ClientInto will set client on i and return the result if it implements Client. Returns -// false if i does not implement Client. -func ClientInto(client client.Client, i interface{}) (bool, error) { - if s, ok := i.(Client); ok { - return true, s.InjectClient(client) - } - return false, nil -} - -// Scheme is used by the ControllerManager to inject Scheme into Sources, EventHandlers, Predicates, and -// Reconciles. -type Scheme interface { - InjectScheme(scheme *runtime.Scheme) error -} - -// SchemeInto will set scheme and return the result on i if it implements Scheme. Returns -// false if i does not implement Scheme. -func SchemeInto(scheme *runtime.Scheme, i interface{}) (bool, error) { - if is, ok := i.(Scheme); ok { - return true, is.InjectScheme(scheme) - } - return false, nil -} - -// Stoppable is used by the ControllerManager to inject stop channel into Sources, -// EventHandlers, Predicates, and Reconciles. -type Stoppable interface { - InjectStopChannel(<-chan struct{}) error -} - -// StopChannelInto will set stop channel on i and return the result if it implements Stoppable. -// Returns false if i does not implement Stoppable. -func StopChannelInto(stop <-chan struct{}, i interface{}) (bool, error) { - if s, ok := i.(Stoppable); ok { - return true, s.InjectStopChannel(stop) - } - return false, nil -} - -// Mapper is used to inject the rest mapper to components that may need it. -type Mapper interface { - InjectMapper(meta.RESTMapper) error -} - -// MapperInto will set the rest mapper on i and return the result if it implements Mapper. -// Returns false if i does not implement Mapper. -func MapperInto(mapper meta.RESTMapper, i interface{}) (bool, error) { - if m, ok := i.(Mapper); ok { - return true, m.InjectMapper(mapper) - } - return false, nil -} - -// Func injects dependencies into i. -type Func func(i interface{}) error - -// Injector is used by the ControllerManager to inject Func into Controllers. -type Injector interface { - InjectFunc(f Func) error -} - -// InjectorInto will set f and return the result on i if it implements Injector. Returns -// false if i does not implement Injector. -func InjectorInto(f Func, i interface{}) (bool, error) { - if ii, ok := i.(Injector); ok { - return true, ii.InjectFunc(f) - } - return false, nil -} - -// Logger is used to inject Loggers into components that need them -// and don't otherwise have opinions. -type Logger interface { - InjectLogger(l logr.Logger) error -} - -// LoggerInto will set the logger on the given object if it implements inject.Logger, -// returning true if a InjectLogger was called, and false otherwise. -func LoggerInto(l logr.Logger, i interface{}) (bool, error) { - if injectable, wantsLogger := i.(Logger); wantsLogger { - return true, injectable.InjectLogger(l) - } - return false, nil -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go b/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go index 6b67563924..099c8d68fa 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go @@ -18,28 +18,19 @@ package source import ( "context" - "errors" "fmt" "sync" - "time" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" - logf "sigs.k8s.io/controller-runtime/pkg/internal/log" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" - "sigs.k8s.io/controller-runtime/pkg/source/internal" + internal "sigs.k8s.io/controller-runtime/pkg/internal/source" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/predicate" ) -var log = logf.RuntimeLog.WithName("source") - const ( // defaultBufferSize is the default number of event notifications that can be buffered. defaultBufferSize = 1024 @@ -52,8 +43,7 @@ const ( // // * Use Channel for events originating outside the cluster (eh.g. GitHub Webhook callback, Polling external urls). // -// Users may build their own Source implementations. If their implementations implement any of the inject package -// interfaces, the dependencies will be injected by the Controller when Watch is called. +// Users may build their own Source implementations. type Source interface { // Start is internal and should be called only by the Controller to register an EventHandler with the Informer // to enqueue reconcile.Requests. @@ -67,144 +57,9 @@ type SyncingSource interface { WaitForSync(ctx context.Context) error } -// NewKindWithCache creates a Source without InjectCache, so that it is assured that the given cache is used -// and not overwritten. It can be used to watch objects in a different cluster by passing the cache -// from that other cluster. -func NewKindWithCache(object client.Object, cache cache.Cache) SyncingSource { - return &kindWithCache{kind: Kind{Type: object, cache: cache}} -} - -type kindWithCache struct { - kind Kind -} - -func (ks *kindWithCache) Start(ctx context.Context, handler handler.EventHandler, queue workqueue.RateLimitingInterface, - prct ...predicate.Predicate) error { - return ks.kind.Start(ctx, handler, queue, prct...) -} - -func (ks *kindWithCache) String() string { - return ks.kind.String() -} - -func (ks *kindWithCache) WaitForSync(ctx context.Context) error { - return ks.kind.WaitForSync(ctx) -} - -// Kind is used to provide a source of events originating inside the cluster from Watches (e.g. Pod Create). -type Kind struct { - // Type is the type of object to watch. e.g. &v1.Pod{} - Type client.Object - - // cache used to watch APIs - cache cache.Cache - - // started may contain an error if one was encountered during startup. If its closed and does not - // contain an error, startup and syncing finished. - started chan error - startCancel func() -} - -var _ SyncingSource = &Kind{} - -// Start is internal and should be called only by the Controller to register an EventHandler with the Informer -// to enqueue reconcile.Requests. -func (ks *Kind) Start(ctx context.Context, handler handler.EventHandler, queue workqueue.RateLimitingInterface, - prct ...predicate.Predicate) error { - // Type should have been specified by the user. - if ks.Type == nil { - return fmt.Errorf("must specify Kind.Type") - } - - // cache should have been injected before Start was called - if ks.cache == nil { - return fmt.Errorf("must call CacheInto on Kind before calling Start") - } - - // cache.GetInformer will block until its context is cancelled if the cache was already started and it can not - // sync that informer (most commonly due to RBAC issues). - ctx, ks.startCancel = context.WithCancel(ctx) - ks.started = make(chan error) - go func() { - var ( - i cache.Informer - lastErr error - ) - - // Tries to get an informer until it returns true, - // an error or the specified context is cancelled or expired. - if err := wait.PollImmediateUntilWithContext(ctx, 10*time.Second, func(ctx context.Context) (bool, error) { - // Lookup the Informer from the Cache and add an EventHandler which populates the Queue - i, lastErr = ks.cache.GetInformer(ctx, ks.Type) - if lastErr != nil { - kindMatchErr := &meta.NoKindMatchError{} - switch { - case errors.As(lastErr, &kindMatchErr): - log.Error(lastErr, "if kind is a CRD, it should be installed before calling Start", - "kind", kindMatchErr.GroupKind) - case runtime.IsNotRegisteredError(lastErr): - log.Error(lastErr, "kind must be registered to the Scheme") - default: - log.Error(lastErr, "failed to get informer from cache") - } - return false, nil // Retry. - } - return true, nil - }); err != nil { - if lastErr != nil { - ks.started <- fmt.Errorf("failed to get informer from cache: %w", lastErr) - return - } - ks.started <- err - return - } - - _, err := i.AddEventHandler(internal.EventHandler{Queue: queue, EventHandler: handler, Predicates: prct}) - if err != nil { - ks.started <- err - return - } - if !ks.cache.WaitForCacheSync(ctx) { - // Would be great to return something more informative here - ks.started <- errors.New("cache did not sync") - } - close(ks.started) - }() - - return nil -} - -func (ks *Kind) String() string { - if ks.Type != nil { - return fmt.Sprintf("kind source: %T", ks.Type) - } - return "kind source: unknown type" -} - -// WaitForSync implements SyncingSource to allow controllers to wait with starting -// workers until the cache is synced. -func (ks *Kind) WaitForSync(ctx context.Context) error { - select { - case err := <-ks.started: - return err - case <-ctx.Done(): - ks.startCancel() - if errors.Is(ctx.Err(), context.Canceled) { - return nil - } - return errors.New("timed out waiting for cache to be synced") - } -} - -var _ inject.Cache = &Kind{} - -// InjectCache is internal should be called only by the Controller. InjectCache is used to inject -// the Cache dependency initialized by the ControllerManager. -func (ks *Kind) InjectCache(c cache.Cache) error { - if ks.cache == nil { - ks.cache = c - } - return nil +// Kind creates a KindSource with the given cache provider. +func Kind(cache cache.Cache, object client.Object) SyncingSource { + return &internal.Kind{Type: object, Cache: cache} } var _ Source = &Channel{} @@ -219,9 +74,6 @@ type Channel struct { // Source is the source channel to fetch GenericEvents Source <-chan event.GenericEvent - // stop is to end ongoing goroutine, and close the channels - stop <-chan struct{} - // dest is the destination channels of the added event handlers dest []chan event.GenericEvent @@ -237,18 +89,6 @@ func (cs *Channel) String() string { return fmt.Sprintf("channel source: %p", cs) } -var _ inject.Stoppable = &Channel{} - -// InjectStopChannel is internal should be called only by the Controller. -// It is used to inject the stop channel initialized by the ControllerManager. -func (cs *Channel) InjectStopChannel(stop <-chan struct{}) error { - if cs.stop == nil { - cs.stop = stop - } - - return nil -} - // Start implements Source and should only be called by the Controller. func (cs *Channel) Start( ctx context.Context, @@ -260,11 +100,6 @@ func (cs *Channel) Start( return fmt.Errorf("must specify Channel.Source") } - // stop should have been injected before Start was called - if cs.stop == nil { - return fmt.Errorf("must call InjectStop on Channel before calling Start") - } - // use default value if DestBufferSize not specified if cs.DestBufferSize == 0 { cs.DestBufferSize = defaultBufferSize @@ -292,7 +127,11 @@ func (cs *Channel) Start( } if shouldHandle { - handler.Generic(evt, queue) + func() { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + handler.Generic(ctx, evt, queue) + }() } } }() @@ -359,7 +198,7 @@ func (is *Informer) Start(ctx context.Context, handler handler.EventHandler, que return fmt.Errorf("must specify Informer.Informer") } - _, err := is.Informer.AddEventHandler(internal.EventHandler{Queue: queue, EventHandler: handler, Predicates: prct}) + _, err := is.Informer.AddEventHandler(internal.NewEventHandler(ctx, queue, handler, prct).HandlerFuncs()) if err != nil { return err } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/decode.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/decode.go index c7cb71b755..7e9c0a96bc 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/decode.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/decode.go @@ -19,7 +19,6 @@ package admission import ( "fmt" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/util/json" @@ -32,8 +31,11 @@ type Decoder struct { } // NewDecoder creates a Decoder given the runtime.Scheme. -func NewDecoder(scheme *runtime.Scheme) (*Decoder, error) { - return &Decoder{codecs: serializer.NewCodecFactory(scheme)}, nil +func NewDecoder(scheme *runtime.Scheme) *Decoder { + if scheme == nil { + panic("scheme should never be nil") + } + return &Decoder{codecs: serializer.NewCodecFactory(scheme)} } // Decode decodes the inlined object in the AdmissionRequest into the passed-in runtime.Object. @@ -62,9 +64,14 @@ func (d *Decoder) DecodeRaw(rawObj runtime.RawExtension, into runtime.Object) er if len(rawObj.Raw) == 0 { return fmt.Errorf("there is no content to decode") } - if unstructuredInto, isUnstructured := into.(*unstructured.Unstructured); isUnstructured { + if unstructuredInto, isUnstructured := into.(runtime.Unstructured); isUnstructured { // unmarshal into unstructured's underlying object to avoid calling the decoder - return json.Unmarshal(rawObj.Raw, &unstructuredInto.Object) + var object map[string]interface{} + if err := json.Unmarshal(rawObj.Raw, &object); err != nil { + return err + } + unstructuredInto.SetUnstructuredContent(object) + return nil } deserializer := d.codecs.UniversalDeserializer() diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go index e4e0778f57..a3b7207168 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go @@ -33,9 +33,9 @@ type Defaulter interface { } // DefaultingWebhookFor creates a new Webhook for Defaulting the provided type. -func DefaultingWebhookFor(defaulter Defaulter) *Webhook { +func DefaultingWebhookFor(scheme *runtime.Scheme, defaulter Defaulter) *Webhook { return &Webhook{ - Handler: &mutatingHandler{defaulter: defaulter}, + Handler: &mutatingHandler{defaulter: defaulter, decoder: NewDecoder(scheme)}, } } @@ -44,16 +44,11 @@ type mutatingHandler struct { decoder *Decoder } -var _ DecoderInjector = &mutatingHandler{} - -// InjectDecoder injects the decoder into a mutatingHandler. -func (h *mutatingHandler) InjectDecoder(d *Decoder) error { - h.decoder = d - return nil -} - // Handle handles admission requests. func (h *mutatingHandler) Handle(ctx context.Context, req Request) Response { + if h.decoder == nil { + panic("decoder should never be nil") + } if h.defaulter == nil { panic("defaulter should never be nil") } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter_custom.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter_custom.go index 7007984245..5f697e7dce 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter_custom.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter_custom.go @@ -34,9 +34,9 @@ type CustomDefaulter interface { } // WithCustomDefaulter creates a new Webhook for a CustomDefaulter interface. -func WithCustomDefaulter(obj runtime.Object, defaulter CustomDefaulter) *Webhook { +func WithCustomDefaulter(scheme *runtime.Scheme, obj runtime.Object, defaulter CustomDefaulter) *Webhook { return &Webhook{ - Handler: &defaulterForType{object: obj, defaulter: defaulter}, + Handler: &defaulterForType{object: obj, defaulter: defaulter, decoder: NewDecoder(scheme)}, } } @@ -46,15 +46,11 @@ type defaulterForType struct { decoder *Decoder } -var _ DecoderInjector = &defaulterForType{} - -func (h *defaulterForType) InjectDecoder(d *Decoder) error { - h.decoder = d - return nil -} - // Handle handles admission requests. func (h *defaulterForType) Handle(ctx context.Context, req Request) Response { + if h.decoder == nil { + panic("decoder should never be nil") + } if h.defaulter == nil { panic("defaulter should never be nil") } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/doc.go index 0b274dd02b..8dc0cbec6f 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/doc.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/doc.go @@ -20,9 +20,3 @@ Package admission provides implementation for admission webhook and methods to i See examples/mutatingwebhook.go and examples/validatingwebhook.go for examples of admission webhooks. */ package admission - -import ( - logf "sigs.k8s.io/controller-runtime/pkg/internal/log" -) - -var log = logf.RuntimeLog.WithName("admission") diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go index 066cc42256..84ab5e75a4 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go @@ -52,7 +52,7 @@ func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { var reviewResponse Response if r.Body == nil { err = errors.New("request body is empty") - wh.log.Error(err, "bad request") + wh.getLogger(nil).Error(err, "bad request") reviewResponse = Errored(http.StatusBadRequest, err) wh.writeResponse(w, reviewResponse) return @@ -60,7 +60,7 @@ func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() if body, err = io.ReadAll(r.Body); err != nil { - wh.log.Error(err, "unable to read the body from the incoming request") + wh.getLogger(nil).Error(err, "unable to read the body from the incoming request") reviewResponse = Errored(http.StatusBadRequest, err) wh.writeResponse(w, reviewResponse) return @@ -69,7 +69,7 @@ func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { // verify the content type is accurate if contentType := r.Header.Get("Content-Type"); contentType != "application/json" { err = fmt.Errorf("contentType=%s, expected application/json", contentType) - wh.log.Error(err, "unable to process a request with an unknown content type", "content type", contentType) + wh.getLogger(nil).Error(err, "unable to process a request with unknown content type") reviewResponse = Errored(http.StatusBadRequest, err) wh.writeResponse(w, reviewResponse) return @@ -88,12 +88,12 @@ func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { ar.SetGroupVersionKind(v1.SchemeGroupVersion.WithKind("AdmissionReview")) _, actualAdmRevGVK, err := admissionCodecs.UniversalDeserializer().Decode(body, nil, &ar) if err != nil { - wh.log.Error(err, "unable to decode the request") + wh.getLogger(nil).Error(err, "unable to decode the request") reviewResponse = Errored(http.StatusBadRequest, err) wh.writeResponse(w, reviewResponse) return } - wh.log.V(1).Info("received request", "UID", req.UID, "kind", req.Kind, "resource", req.Resource) + wh.getLogger(&req).V(4).Info("received request") reviewResponse = wh.Handle(ctx, req) wh.writeResponseTyped(w, reviewResponse, actualAdmRevGVK) @@ -124,7 +124,7 @@ func (wh *Webhook) writeResponseTyped(w io.Writer, response Response, admRevGVK // writeAdmissionResponse writes ar to w. func (wh *Webhook) writeAdmissionResponse(w io.Writer, ar v1.AdmissionReview) { if err := json.NewEncoder(w).Encode(ar); err != nil { - wh.log.Error(err, "unable to encode and write the response") + wh.getLogger(nil).Error(err, "unable to encode and write the response") // Since the `ar v1.AdmissionReview` is a clear and legal object, // it should not have problem to be marshalled into bytes. // The error here is probably caused by the abnormal HTTP connection, @@ -132,15 +132,15 @@ func (wh *Webhook) writeAdmissionResponse(w io.Writer, ar v1.AdmissionReview) { // to avoid endless circular calling. serverError := Errored(http.StatusInternalServerError, err) if err = json.NewEncoder(w).Encode(v1.AdmissionReview{Response: &serverError.AdmissionResponse}); err != nil { - wh.log.Error(err, "still unable to encode and write the InternalServerError response") + wh.getLogger(nil).Error(err, "still unable to encode and write the InternalServerError response") } } else { res := ar.Response - if log := wh.log; log.V(1).Enabled() { + if log := wh.getLogger(nil); log.V(4).Enabled() { if res.Result != nil { - log = log.WithValues("code", res.Result.Code, "reason", res.Result.Reason) + log = log.WithValues("code", res.Result.Code, "reason", res.Result.Reason, "message", res.Result.Message) } - log.V(1).Info("wrote response", "UID", res.UID, "allowed", res.Allowed) + log.V(4).Info("wrote response", "requestID", res.UID, "allowed", res.Allowed) } } } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/inject.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/inject.go deleted file mode 100644 index d5af0d598f..0000000000 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/inject.go +++ /dev/null @@ -1,31 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package admission - -// DecoderInjector is used by the ControllerManager to inject decoder into webhook handlers. -type DecoderInjector interface { - InjectDecoder(*Decoder) error -} - -// InjectDecoderInto will set decoder on i and return the result if it implements Decoder. Returns -// false if i does not implement Decoder. -func InjectDecoderInto(decoder *Decoder, i interface{}) (bool, error) { - if s, ok := i.(DecoderInjector); ok { - return true, s.InjectDecoder(decoder) - } - return false, nil -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/multi.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/multi.go index 26900cf2eb..2f7820d04b 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/multi.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/multi.go @@ -25,8 +25,6 @@ import ( jsonpatch "gomodules.xyz/jsonpatch/v2" admissionv1 "k8s.io/api/admission/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" ) type multiMutating []Handler @@ -62,31 +60,6 @@ func (hs multiMutating) Handle(ctx context.Context, req Request) Response { } } -// InjectFunc injects the field setter into the handlers. -func (hs multiMutating) InjectFunc(f inject.Func) error { - // inject directly into the handlers. It would be more correct - // to do this in a sync.Once in Handle (since we don't have some - // other start/finalize-type method), but it's more efficient to - // do it here, presumably. - for _, handler := range hs { - if err := f(handler); err != nil { - return err - } - } - - return nil -} - -// InjectDecoder injects the decoder into the handlers. -func (hs multiMutating) InjectDecoder(d *Decoder) error { - for _, handler := range hs { - if _, err := InjectDecoderInto(d, handler); err != nil { - return err - } - } - return nil -} - // MultiMutatingHandler combines multiple mutating webhook handlers into a single // mutating webhook handler. Handlers are called in sequential order, and the first // `allowed: false` response may short-circuit the rest. Users must take care to @@ -120,28 +93,3 @@ func (hs multiValidating) Handle(ctx context.Context, req Request) Response { func MultiValidatingHandler(handlers ...Handler) Handler { return multiValidating(handlers) } - -// InjectFunc injects the field setter into the handlers. -func (hs multiValidating) InjectFunc(f inject.Func) error { - // inject directly into the handlers. It would be more correct - // to do this in a sync.Once in Handle (since we don't have some - // other start/finalize-type method), but it's more efficient to - // do it here, presumably. - for _, handler := range hs { - if err := f(handler); err != nil { - return err - } - } - - return nil -} - -// InjectDecoder injects the decoder into the handlers. -func (hs multiValidating) InjectDecoder(d *Decoder) error { - for _, handler := range hs { - if _, err := InjectDecoderInto(d, handler); err != nil { - return err - } - } - return nil -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/response.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/response.go index 24ff1dee3c..ec1c88c989 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/response.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/response.go @@ -26,21 +26,21 @@ import ( // Allowed constructs a response indicating that the given operation // is allowed (without any patches). -func Allowed(reason string) Response { - return ValidationResponse(true, reason) +func Allowed(message string) Response { + return ValidationResponse(true, message) } // Denied constructs a response indicating that the given operation // is not allowed. -func Denied(reason string) Response { - return ValidationResponse(false, reason) +func Denied(message string) Response { + return ValidationResponse(false, message) } // Patched constructs a response indicating that the given operation is // allowed, and that the target object should be modified by the given // JSONPatch operations. -func Patched(reason string, patches ...jsonpatch.JsonPatchOperation) Response { - resp := Allowed(reason) +func Patched(message string, patches ...jsonpatch.JsonPatchOperation) Response { + resp := Allowed(message) resp.Patches = patches return resp @@ -60,21 +60,24 @@ func Errored(code int32, err error) Response { } // ValidationResponse returns a response for admitting a request. -func ValidationResponse(allowed bool, reason string) Response { +func ValidationResponse(allowed bool, message string) Response { code := http.StatusForbidden + reason := metav1.StatusReasonForbidden if allowed { code = http.StatusOK + reason = "" } resp := Response{ AdmissionResponse: admissionv1.AdmissionResponse{ Allowed: allowed, Result: &metav1.Status{ - Code: int32(code), + Code: int32(code), + Reason: reason, }, }, } - if len(reason) > 0 { - resp.Result.Reason = metav1.StatusReason(reason) + if len(message) > 0 { + resp.Result.Message = message } return resp } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go index 4b27e75ede..00bda8a4ce 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go @@ -18,7 +18,8 @@ package admission import ( "context" - goerrors "errors" + "errors" + "fmt" "net/http" v1 "k8s.io/api/admission/v1" @@ -26,18 +27,35 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) +// Warnings represents warning messages. +type Warnings []string + // Validator defines functions for validating an operation. +// The custom resource kind which implements this interface can validate itself. +// To validate the custom resource with another specific struct, use CustomValidator instead. type Validator interface { runtime.Object - ValidateCreate() error - ValidateUpdate(old runtime.Object) error - ValidateDelete() error + + // ValidateCreate validates the object on creation. + // The optional warnings will be added to the response as warning messages. + // Return an error if the object is invalid. + ValidateCreate() (warnings Warnings, err error) + + // ValidateUpdate validates the object on update. The oldObj is the object before the update. + // The optional warnings will be added to the response as warning messages. + // Return an error if the object is invalid. + ValidateUpdate(old runtime.Object) (warnings Warnings, err error) + + // ValidateDelete validates the object on deletion. + // The optional warnings will be added to the response as warning messages. + // Return an error if the object is invalid. + ValidateDelete() (warnings Warnings, err error) } // ValidatingWebhookFor creates a new Webhook for validating the provided type. -func ValidatingWebhookFor(validator Validator) *Webhook { +func ValidatingWebhookFor(scheme *runtime.Scheme, validator Validator) *Webhook { return &Webhook{ - Handler: &validatingHandler{validator: validator}, + Handler: &validatingHandler{validator: validator, decoder: NewDecoder(scheme)}, } } @@ -46,42 +64,34 @@ type validatingHandler struct { decoder *Decoder } -var _ DecoderInjector = &validatingHandler{} - -// InjectDecoder injects the decoder into a validatingHandler. -func (h *validatingHandler) InjectDecoder(d *Decoder) error { - h.decoder = d - return nil -} - // Handle handles admission requests. func (h *validatingHandler) Handle(ctx context.Context, req Request) Response { + if h.decoder == nil { + panic("decoder should never be nil") + } if h.validator == nil { panic("validator should never be nil") } - // Get the object in the request obj := h.validator.DeepCopyObject().(Validator) - if req.Operation == v1.Create { - err := h.decoder.Decode(req, obj) - if err != nil { - return Errored(http.StatusBadRequest, err) - } - err = obj.ValidateCreate() - if err != nil { - var apiStatus apierrors.APIStatus - if goerrors.As(err, &apiStatus) { - return validationResponseFromStatus(false, apiStatus.Status()) - } - return Denied(err.Error()) + var err error + var warnings []string + + switch req.Operation { + case v1.Connect: + // No validation for connect requests. + // TODO(vincepri): Should we validate CONNECT requests? In what cases? + case v1.Create: + if err = h.decoder.Decode(req, obj); err != nil { + return Errored(http.StatusBadRequest, err) } - } - if req.Operation == v1.Update { + warnings, err = obj.ValidateCreate() + case v1.Update: oldObj := obj.DeepCopyObject() - err := h.decoder.DecodeRaw(req.Object, obj) + err = h.decoder.DecodeRaw(req.Object, obj) if err != nil { return Errored(http.StatusBadRequest, err) } @@ -90,33 +100,26 @@ func (h *validatingHandler) Handle(ctx context.Context, req Request) Response { return Errored(http.StatusBadRequest, err) } - err = obj.ValidateUpdate(oldObj) - if err != nil { - var apiStatus apierrors.APIStatus - if goerrors.As(err, &apiStatus) { - return validationResponseFromStatus(false, apiStatus.Status()) - } - return Denied(err.Error()) - } - } - - if req.Operation == v1.Delete { + warnings, err = obj.ValidateUpdate(oldObj) + case v1.Delete: // In reference to PR: https://github.com/kubernetes/kubernetes/pull/76346 // OldObject contains the object being deleted - err := h.decoder.DecodeRaw(req.OldObject, obj) + err = h.decoder.DecodeRaw(req.OldObject, obj) if err != nil { return Errored(http.StatusBadRequest, err) } - err = obj.ValidateDelete() - if err != nil { - var apiStatus apierrors.APIStatus - if goerrors.As(err, &apiStatus) { - return validationResponseFromStatus(false, apiStatus.Status()) - } - return Denied(err.Error()) - } + warnings, err = obj.ValidateDelete() + default: + return Errored(http.StatusBadRequest, fmt.Errorf("unknown operation %q", req.Operation)) } - return Allowed("") + if err != nil { + var apiStatus apierrors.APIStatus + if errors.As(err, &apiStatus) { + return validationResponseFromStatus(false, apiStatus.Status()).WithWarnings(warnings...) + } + return Denied(err.Error()).WithWarnings(warnings...) + } + return Allowed("").WithWarnings(warnings...) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator_custom.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator_custom.go index 33252f1134..e99fbd8a85 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator_custom.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator_custom.go @@ -28,16 +28,29 @@ import ( ) // CustomValidator defines functions for validating an operation. +// The object to be validated is passed into methods as a parameter. type CustomValidator interface { - ValidateCreate(ctx context.Context, obj runtime.Object) error - ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) error - ValidateDelete(ctx context.Context, obj runtime.Object) error + + // ValidateCreate validates the object on creation. + // The optional warnings will be added to the response as warning messages. + // Return an error if the object is invalid. + ValidateCreate(ctx context.Context, obj runtime.Object) (warnings Warnings, err error) + + // ValidateUpdate validates the object on update. + // The optional warnings will be added to the response as warning messages. + // Return an error if the object is invalid. + ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (warnings Warnings, err error) + + // ValidateDelete validates the object on deletion. + // The optional warnings will be added to the response as warning messages. + // Return an error if the object is invalid. + ValidateDelete(ctx context.Context, obj runtime.Object) (warnings Warnings, err error) } // WithCustomValidator creates a new Webhook for validating the provided type. -func WithCustomValidator(obj runtime.Object, validator CustomValidator) *Webhook { +func WithCustomValidator(scheme *runtime.Scheme, obj runtime.Object, validator CustomValidator) *Webhook { return &Webhook{ - Handler: &validatorForType{object: obj, validator: validator}, + Handler: &validatorForType{object: obj, validator: validator, decoder: NewDecoder(scheme)}, } } @@ -47,16 +60,11 @@ type validatorForType struct { decoder *Decoder } -var _ DecoderInjector = &validatorForType{} - -// InjectDecoder injects the decoder into a validatingHandler. -func (h *validatorForType) InjectDecoder(d *Decoder) error { - h.decoder = d - return nil -} - // Handle handles admission requests. func (h *validatorForType) Handle(ctx context.Context, req Request) Response { + if h.decoder == nil { + panic("decoder should never be nil") + } if h.validator == nil { panic("validator should never be nil") } @@ -70,13 +78,18 @@ func (h *validatorForType) Handle(ctx context.Context, req Request) Response { obj := h.object.DeepCopyObject() var err error + var warnings []string + switch req.Operation { + case v1.Connect: + // No validation for connect requests. + // TODO(vincepri): Should we validate CONNECT requests? In what cases? case v1.Create: if err := h.decoder.Decode(req, obj); err != nil { return Errored(http.StatusBadRequest, err) } - err = h.validator.ValidateCreate(ctx, obj) + warnings, err = h.validator.ValidateCreate(ctx, obj) case v1.Update: oldObj := obj.DeepCopyObject() if err := h.decoder.DecodeRaw(req.Object, obj); err != nil { @@ -86,7 +99,7 @@ func (h *validatorForType) Handle(ctx context.Context, req Request) Response { return Errored(http.StatusBadRequest, err) } - err = h.validator.ValidateUpdate(ctx, oldObj, obj) + warnings, err = h.validator.ValidateUpdate(ctx, oldObj, obj) case v1.Delete: // In reference to PR: https://github.com/kubernetes/kubernetes/pull/76346 // OldObject contains the object being deleted @@ -94,20 +107,20 @@ func (h *validatorForType) Handle(ctx context.Context, req Request) Response { return Errored(http.StatusBadRequest, err) } - err = h.validator.ValidateDelete(ctx, obj) + warnings, err = h.validator.ValidateDelete(ctx, obj) default: - return Errored(http.StatusBadRequest, fmt.Errorf("unknown operation request %q", req.Operation)) + return Errored(http.StatusBadRequest, fmt.Errorf("unknown operation %q", req.Operation)) } // Check the error message first. if err != nil { var apiStatus apierrors.APIStatus if errors.As(err, &apiStatus) { - return validationResponseFromStatus(false, apiStatus.Status()) + return validationResponseFromStatus(false, apiStatus.Status()).WithWarnings(warnings...) } - return Denied(err.Error()) + return Denied(err.Error()).WithWarnings(warnings...) } // Return allowed if everything succeeded. - return Allowed("") + return Allowed("").WithWarnings(warnings...) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go index d10b97dddb..f1767f31b2 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go @@ -21,18 +21,17 @@ import ( "errors" "fmt" "net/http" + "sync" "github.com/go-logr/logr" - jsonpatch "gomodules.xyz/jsonpatch/v2" + "gomodules.xyz/jsonpatch/v2" admissionv1 "k8s.io/api/admission/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/json" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/client-go/kubernetes/scheme" + "k8s.io/klog/v2" - logf "sigs.k8s.io/controller-runtime/pkg/internal/log" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" + logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics" ) @@ -131,16 +130,14 @@ type Webhook struct { // headers thus allowing you to read them from within the handler WithContextFunc func(context.Context, *http.Request) context.Context - // decoder is constructed on receiving a scheme and passed down to then handler - decoder *Decoder + // LogConstructor is used to construct a logger for logging messages during webhook calls + // based on the given base logger (which might carry more values like the webhook's path). + // Note: LogConstructor has to be able to handle nil requests as we are also using it + // outside the context of requests. + LogConstructor func(base logr.Logger, req *Request) logr.Logger - log logr.Logger -} - -// InjectLogger gets a handle to a logging instance, hopefully with more info about this particular webhook. -func (wh *Webhook) InjectLogger(l logr.Logger) error { - wh.log = l - return nil + setupLogOnce sync.Once + log logr.Logger } // WithRecoverPanic takes a bool flag which indicates whether the panic caused by webhook should be recovered. @@ -166,79 +163,47 @@ func (wh *Webhook) Handle(ctx context.Context, req Request) (response Response) }() } + reqLog := wh.getLogger(&req) + ctx = logf.IntoContext(ctx, reqLog) + resp := wh.Handler.Handle(ctx, req) if err := resp.Complete(req); err != nil { - wh.log.Error(err, "unable to encode response") + reqLog.Error(err, "unable to encode response") return Errored(http.StatusInternalServerError, errUnableToEncodeResponse) } return resp } -// InjectScheme injects a scheme into the webhook, in order to construct a Decoder. -func (wh *Webhook) InjectScheme(s *runtime.Scheme) error { - // TODO(directxman12): we should have a better way to pass this down - - var err error - wh.decoder, err = NewDecoder(s) - if err != nil { - return err - } - - // inject the decoder here too, just in case the order of calling this is not - // scheme first, then inject func - if wh.Handler != nil { - if _, err := InjectDecoderInto(wh.GetDecoder(), wh.Handler); err != nil { - return err +// getLogger constructs a logger from the injected log and LogConstructor. +func (wh *Webhook) getLogger(req *Request) logr.Logger { + wh.setupLogOnce.Do(func() { + if wh.log.GetSink() == nil { + wh.log = logf.Log.WithName("admission") } - } - - return nil -} + }) -// GetDecoder returns a decoder to decode the objects embedded in admission requests. -// It may be nil if we haven't received a scheme to use to determine object types yet. -func (wh *Webhook) GetDecoder() *Decoder { - return wh.decoder + logConstructor := wh.LogConstructor + if logConstructor == nil { + logConstructor = DefaultLogConstructor + } + return logConstructor(wh.log, req) } -// InjectFunc injects the field setter into the webhook. -func (wh *Webhook) InjectFunc(f inject.Func) error { - // inject directly into the handlers. It would be more correct - // to do this in a sync.Once in Handle (since we don't have some - // other start/finalize-type method), but it's more efficient to - // do it here, presumably. - - // also inject a decoder, and wrap this so that we get a setFields - // that injects a decoder (hopefully things don't ignore the duplicate - // InjectorInto call). - - var setFields inject.Func - setFields = func(target interface{}) error { - if err := f(target); err != nil { - return err - } - - if _, err := inject.InjectorInto(setFields, target); err != nil { - return err - } - - if _, err := InjectDecoderInto(wh.GetDecoder(), target); err != nil { - return err - } - - return nil +// DefaultLogConstructor adds some commonly interesting fields to the given logger. +func DefaultLogConstructor(base logr.Logger, req *Request) logr.Logger { + if req != nil { + return base.WithValues("object", klog.KRef(req.Namespace, req.Name), + "namespace", req.Namespace, "name", req.Name, + "resource", req.Resource, "user", req.UserInfo.Username, + "requestID", req.UID, + ) } - - return setFields(wh.Handler) + return base } // StandaloneOptions let you configure a StandaloneWebhook. type StandaloneOptions struct { - // Scheme is the scheme used to resolve runtime.Objects to GroupVersionKinds / Resources - // Defaults to the kubernetes/client-go scheme.Scheme, but it's almost always better - // idea to pass your own scheme in. See the documentation in pkg/scheme for more information. - Scheme *runtime.Scheme // Logger to be used by the webhook. // If none is set, it defaults to log.Log global logger. Logger logr.Logger @@ -258,19 +223,9 @@ type StandaloneOptions struct { // in your own server/mux. In order to be accessed by a kubernetes cluster, // all webhook servers require TLS. func StandaloneWebhook(hook *Webhook, opts StandaloneOptions) (http.Handler, error) { - if opts.Scheme == nil { - opts.Scheme = scheme.Scheme - } - - if err := hook.InjectScheme(opts.Scheme); err != nil { - return nil, err + if opts.Logger.GetSink() != nil { + hook.log = opts.Logger } - - if opts.Logger.GetSink() == nil { - opts.Logger = logf.RuntimeLog.WithName("webhook") - } - hook.log = opts.Logger - if opts.MetricsPath == "" { return hook, nil } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go index 99c863264b..23d5bf4350 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go @@ -29,12 +29,9 @@ import ( "sync" "time" - "k8s.io/apimachinery/pkg/runtime" - kscheme "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/certwatcher" "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/internal/httpserver" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" "sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics" ) @@ -49,7 +46,29 @@ var DefaultPort = 9443 // at the default locations (tls.crt and tls.key). If you do not // want to configure TLS (i.e for testing purposes) run an // admission.StandaloneWebhook in your own server. -type Server struct { +type Server interface { + // NeedLeaderElection implements the LeaderElectionRunnable interface, which indicates + // the webhook server doesn't need leader election. + NeedLeaderElection() bool + + // Register marks the given webhook as being served at the given path. + // It panics if two hooks are registered on the same path. + Register(path string, hook http.Handler) + + // Start runs the server. + // It will install the webhook related resources depend on the server configuration. + Start(ctx context.Context) error + + // StartedChecker returns an healthz.Checker which is healthy after the + // server has been started. + StartedChecker() healthz.Checker + + // WebhookMux returns the servers WebhookMux + WebhookMux() *http.ServeMux +} + +// Options are all the available options for a webhook.Server +type Options struct { // Host is the address that the server will listen on. // Defaults to "" - all addresses. Host string @@ -63,9 +82,13 @@ type Server struct { CertDir string // CertName is the server certificate name. Defaults to tls.crt. + // + // Note: This option should only be set when TLSOpts does not override GetCertificate. CertName string // KeyName is the server key name. Defaults to tls.key. + // + // Note: This option should only be set when TLSOpts does not override GetCertificate. KeyName string // ClientCAName is the CA certificate name which server used to verify remote(client)'s certificate. @@ -82,13 +105,21 @@ type Server struct { // WebhookMux is the multiplexer that handles different webhooks. WebhookMux *http.ServeMux +} - // webhooks keep track of all registered webhooks for dependency injection, - // and to provide better panic messages on duplicate webhook registration. - webhooks map[string]http.Handler +// NewServer constructs a new Server from the provided options. +func NewServer(o Options) Server { + return &DefaultServer{ + Options: o, + } +} - // setFields allows injecting dependencies from an external source - setFields inject.Func +// DefaultServer is the default implementation used for Server. +type DefaultServer struct { + Options Options + + // webhooks keep track of all registered webhooks + webhooks map[string]http.Handler // defaultingOnce ensures that the default fields are only ever set once. defaultingOnce sync.Once @@ -99,41 +130,49 @@ type Server struct { // mu protects access to the webhook map & setFields for Start, Register, etc mu sync.Mutex + + webhookMux *http.ServeMux } // setDefaults does defaulting for the Server. -func (s *Server) setDefaults() { - s.webhooks = map[string]http.Handler{} - if s.WebhookMux == nil { - s.WebhookMux = http.NewServeMux() +func (o *Options) setDefaults() { + if o.WebhookMux == nil { + o.WebhookMux = http.NewServeMux() } - if s.Port <= 0 { - s.Port = DefaultPort + if o.Port <= 0 { + o.Port = DefaultPort } - if len(s.CertDir) == 0 { - s.CertDir = filepath.Join(os.TempDir(), "k8s-webhook-server", "serving-certs") + if len(o.CertDir) == 0 { + o.CertDir = filepath.Join(os.TempDir(), "k8s-webhook-server", "serving-certs") } - if len(s.CertName) == 0 { - s.CertName = "tls.crt" + if len(o.CertName) == 0 { + o.CertName = "tls.crt" } - if len(s.KeyName) == 0 { - s.KeyName = "tls.key" + if len(o.KeyName) == 0 { + o.KeyName = "tls.key" } } +func (s *DefaultServer) setDefaults() { + s.webhooks = map[string]http.Handler{} + s.Options.setDefaults() + + s.webhookMux = s.Options.WebhookMux +} + // NeedLeaderElection implements the LeaderElectionRunnable interface, which indicates // the webhook server doesn't need leader election. -func (*Server) NeedLeaderElection() bool { +func (*DefaultServer) NeedLeaderElection() bool { return false } // Register marks the given webhook as being served at the given path. // It panics if two hooks are registered on the same path. -func (s *Server) Register(path string, hook http.Handler) { +func (s *DefaultServer) Register(path string, hook http.Handler) { s.mu.Lock() defer s.mu.Unlock() @@ -141,51 +180,11 @@ func (s *Server) Register(path string, hook http.Handler) { if _, found := s.webhooks[path]; found { panic(fmt.Errorf("can't register duplicate path: %v", path)) } - // TODO(directxman12): call setfields if we've already started the server s.webhooks[path] = hook - s.WebhookMux.Handle(path, metrics.InstrumentedHook(path, hook)) + s.webhookMux.Handle(path, metrics.InstrumentedHook(path, hook)) regLog := log.WithValues("path", path) regLog.Info("Registering webhook") - - // we've already been "started", inject dependencies here. - // Otherwise, InjectFunc will do this for us later. - if s.setFields != nil { - if err := s.setFields(hook); err != nil { - // TODO(directxman12): swallowing this error isn't great, but we'd have to - // change the signature to fix that - regLog.Error(err, "unable to inject fields into webhook during registration") - } - - baseHookLog := log.WithName("webhooks") - - // NB(directxman12): we don't propagate this further by wrapping setFields because it's - // unclear if this is how we want to deal with log propagation. In this specific instance, - // we want to be able to pass a logger to webhooks because they don't know their own path. - if _, err := inject.LoggerInto(baseHookLog.WithValues("webhook", path), hook); err != nil { - regLog.Error(err, "unable to logger into webhook during registration") - } - } -} - -// StartStandalone runs a webhook server without -// a controller manager. -func (s *Server) StartStandalone(ctx context.Context, scheme *runtime.Scheme) error { - // Use the Kubernetes client-go scheme if none is specified - if scheme == nil { - scheme = kscheme.Scheme - } - - if err := s.InjectFunc(func(i interface{}) error { - if _, err := inject.SchemeInto(scheme, i); err != nil { - return err - } - return nil - }); err != nil { - return err - } - - return s.Start(ctx) } // tlsVersion converts from human-readable TLS version (for example "1.1") @@ -210,41 +209,49 @@ func tlsVersion(version string) (uint16, error) { // Start runs the server. // It will install the webhook related resources depend on the server configuration. -func (s *Server) Start(ctx context.Context) error { +func (s *DefaultServer) Start(ctx context.Context) error { s.defaultingOnce.Do(s.setDefaults) baseHookLog := log.WithName("webhooks") baseHookLog.Info("Starting webhook server") - certPath := filepath.Join(s.CertDir, s.CertName) - keyPath := filepath.Join(s.CertDir, s.KeyName) - - certWatcher, err := certwatcher.New(certPath, keyPath) + tlsMinVersion, err := tlsVersion(s.Options.TLSMinVersion) if err != nil { return err } - go func() { - if err := certWatcher.Start(ctx); err != nil { - log.Error(err, "certificate watcher error") - } - }() - - tlsMinVersion, err := tlsVersion(s.TLSMinVersion) - if err != nil { - return err + cfg := &tls.Config{ //nolint:gosec + NextProtos: []string{"h2"}, + MinVersion: tlsMinVersion, + } + // fallback TLS config ready, will now mutate if passer wants full control over it + for _, op := range s.Options.TLSOpts { + op(cfg) } - cfg := &tls.Config{ //nolint:gosec - NextProtos: []string{"h2"}, - GetCertificate: certWatcher.GetCertificate, - MinVersion: tlsMinVersion, + if cfg.GetCertificate == nil { + certPath := filepath.Join(s.Options.CertDir, s.Options.CertName) + keyPath := filepath.Join(s.Options.CertDir, s.Options.KeyName) + + // Create the certificate watcher and + // set the config's GetCertificate on the TLSConfig + certWatcher, err := certwatcher.New(certPath, keyPath) + if err != nil { + return err + } + cfg.GetCertificate = certWatcher.GetCertificate + + go func() { + if err := certWatcher.Start(ctx); err != nil { + log.Error(err, "certificate watcher error") + } + }() } - // load CA to verify client certificate - if s.ClientCAName != "" { + // Load CA to verify client certificate, if configured. + if s.Options.ClientCAName != "" { certPool := x509.NewCertPool() - clientCABytes, err := os.ReadFile(filepath.Join(s.CertDir, s.ClientCAName)) + clientCABytes, err := os.ReadFile(filepath.Join(s.Options.CertDir, s.Options.ClientCAName)) if err != nil { return fmt.Errorf("failed to read client CA cert: %w", err) } @@ -258,27 +265,23 @@ func (s *Server) Start(ctx context.Context) error { cfg.ClientAuth = tls.RequireAndVerifyClientCert } - // fallback TLS config ready, will now mutate if passer wants full control over it - for _, op := range s.TLSOpts { - op(cfg) - } - - listener, err := tls.Listen("tcp", net.JoinHostPort(s.Host, strconv.Itoa(s.Port)), cfg) + listener, err := tls.Listen("tcp", net.JoinHostPort(s.Options.Host, strconv.Itoa(s.Options.Port)), cfg) if err != nil { return err } - log.Info("Serving webhook server", "host", s.Host, "port", s.Port) + log.Info("Serving webhook server", "host", s.Options.Host, "port", s.Options.Port) - srv := httpserver.New(s.WebhookMux) + srv := httpserver.New(s.webhookMux) idleConnsClosed := make(chan struct{}) go func() { <-ctx.Done() - log.Info("shutting down webhook server") + log.Info("Shutting down webhook server with timeout of 1 minute") - // TODO: use a context with reasonable timeout - if err := srv.Shutdown(context.Background()); err != nil { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + if err := srv.Shutdown(ctx); err != nil { // Error from closing listeners, or context timeout log.Error(err, "error shutting down the HTTP server") } @@ -298,7 +301,7 @@ func (s *Server) Start(ctx context.Context) error { // StartedChecker returns an healthz.Checker which is healthy after the // server has been started. -func (s *Server) StartedChecker() healthz.Checker { +func (s *DefaultServer) StartedChecker() healthz.Checker { config := &tls.Config{ InsecureSkipVerify: true, //nolint:gosec // config is used to connect to our own webhook port. } @@ -311,7 +314,7 @@ func (s *Server) StartedChecker() healthz.Checker { } d := &net.Dialer{Timeout: 10 * time.Second} - conn, err := tls.DialWithDialer(d, "tcp", net.JoinHostPort(s.Host, strconv.Itoa(s.Port)), config) + conn, err := tls.DialWithDialer(d, "tcp", net.JoinHostPort(s.Options.Host, strconv.Itoa(s.Options.Port)), config) if err != nil { return fmt.Errorf("webhook server is not reachable: %w", err) } @@ -324,23 +327,7 @@ func (s *Server) StartedChecker() healthz.Checker { } } -// InjectFunc injects the field setter into the server. -func (s *Server) InjectFunc(f inject.Func) error { - s.setFields = f - - // inject fields here that weren't injected in Register because we didn't have setFields yet. - baseHookLog := log.WithName("webhooks") - for hookPath, webhook := range s.webhooks { - if err := s.setFields(webhook); err != nil { - return err - } - - // NB(directxman12): we don't propagate this further by wrapping setFields because it's - // unclear if this is how we want to deal with log propagation. In this specific instance, - // we want to be able to pass a logger to webhooks because they don't know their own path. - if _, err := inject.LoggerInto(baseHookLog.WithValues("webhook", hookPath), webhook); err != nil { - return err - } - } - return nil +// WebhookMux returns the servers WebhookMux +func (s *DefaultServer) WebhookMux() *http.ServeMux { + return s.webhookMux } diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go new file mode 100644 index 0000000000..75a492d8ea --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go @@ -0,0 +1,121 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package merge + +import ( + "fmt" + "sort" + "strings" + + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +// Conflict is a conflict on a specific field with the current manager of +// that field. It does implement the error interface so that it can be +// used as an error. +type Conflict struct { + Manager string + Path fieldpath.Path +} + +// Conflict is an error. +var _ error = Conflict{} + +// Error formats the conflict as an error. +func (c Conflict) Error() string { + return fmt.Sprintf("conflict with %q: %v", c.Manager, c.Path) +} + +// Equals returns true if c == c2 +func (c Conflict) Equals(c2 Conflict) bool { + if c.Manager != c2.Manager { + return false + } + return c.Path.Equals(c2.Path) +} + +// Conflicts accumulates multiple conflicts and aggregates them by managers. +type Conflicts []Conflict + +var _ error = Conflicts{} + +// Error prints the list of conflicts, grouped by sorted managers. +func (conflicts Conflicts) Error() string { + if len(conflicts) == 1 { + return conflicts[0].Error() + } + + m := map[string][]fieldpath.Path{} + for _, conflict := range conflicts { + m[conflict.Manager] = append(m[conflict.Manager], conflict.Path) + } + + managers := []string{} + for manager := range m { + managers = append(managers, manager) + } + + // Print conflicts by sorted managers. + sort.Strings(managers) + + messages := []string{} + for _, manager := range managers { + messages = append(messages, fmt.Sprintf("conflicts with %q:", manager)) + for _, path := range m[manager] { + messages = append(messages, fmt.Sprintf("- %v", path)) + } + } + return strings.Join(messages, "\n") +} + +// Equals returns true if the lists of conflicts are the same. +func (c Conflicts) Equals(c2 Conflicts) bool { + if len(c) != len(c2) { + return false + } + for i := range c { + if !c[i].Equals(c2[i]) { + return false + } + } + return true +} + +// ToSet aggregates conflicts for all managers into a single Set. +func (c Conflicts) ToSet() *fieldpath.Set { + set := fieldpath.NewSet() + for _, conflict := range []Conflict(c) { + set.Insert(conflict.Path) + } + return set +} + +// ConflictsFromManagers creates a list of conflicts given Managers sets. +func ConflictsFromManagers(sets fieldpath.ManagedFields) Conflicts { + conflicts := []Conflict{} + + for manager, set := range sets { + set.Set().Iterate(func(p fieldpath.Path) { + conflicts = append(conflicts, Conflict{ + Manager: manager, + Path: p, + }) + }) + } + + return conflicts +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go new file mode 100644 index 0000000000..1b23dcbd5e --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go @@ -0,0 +1,356 @@ +/* +Copyright 2018 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package merge + +import ( + "fmt" + + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/typed" +) + +// Converter is an interface to the conversion logic. The converter +// needs to be able to convert objects from one version to another. +type Converter interface { + Convert(object *typed.TypedValue, version fieldpath.APIVersion) (*typed.TypedValue, error) + IsMissingVersionError(error) bool +} + +// Updater is the object used to compute updated FieldSets and also +// merge the object on Apply. +type Updater struct { + Converter Converter + IgnoredFields map[fieldpath.APIVersion]*fieldpath.Set + + enableUnions bool +} + +// EnableUnionFeature turns on union handling. It is disabled by default until the +// feature is complete. +func (s *Updater) EnableUnionFeature() { + s.enableUnions = true +} + +func (s *Updater) update(oldObject, newObject *typed.TypedValue, version fieldpath.APIVersion, managers fieldpath.ManagedFields, workflow string, force bool) (fieldpath.ManagedFields, *typed.Comparison, error) { + conflicts := fieldpath.ManagedFields{} + removed := fieldpath.ManagedFields{} + compare, err := oldObject.Compare(newObject) + if err != nil { + return nil, nil, fmt.Errorf("failed to compare objects: %v", err) + } + + versions := map[fieldpath.APIVersion]*typed.Comparison{ + version: compare.ExcludeFields(s.IgnoredFields[version]), + } + + for manager, managerSet := range managers { + if manager == workflow { + continue + } + compare, ok := versions[managerSet.APIVersion()] + if !ok { + var err error + versionedOldObject, err := s.Converter.Convert(oldObject, managerSet.APIVersion()) + if err != nil { + if s.Converter.IsMissingVersionError(err) { + delete(managers, manager) + continue + } + return nil, nil, fmt.Errorf("failed to convert old object: %v", err) + } + versionedNewObject, err := s.Converter.Convert(newObject, managerSet.APIVersion()) + if err != nil { + if s.Converter.IsMissingVersionError(err) { + delete(managers, manager) + continue + } + return nil, nil, fmt.Errorf("failed to convert new object: %v", err) + } + compare, err = versionedOldObject.Compare(versionedNewObject) + if err != nil { + return nil, nil, fmt.Errorf("failed to compare objects: %v", err) + } + versions[managerSet.APIVersion()] = compare.ExcludeFields(s.IgnoredFields[managerSet.APIVersion()]) + } + + conflictSet := managerSet.Set().Intersection(compare.Modified.Union(compare.Added)) + if !conflictSet.Empty() { + conflicts[manager] = fieldpath.NewVersionedSet(conflictSet, managerSet.APIVersion(), false) + } + + if !compare.Removed.Empty() { + removed[manager] = fieldpath.NewVersionedSet(compare.Removed, managerSet.APIVersion(), false) + } + } + + if !force && len(conflicts) != 0 { + return nil, nil, ConflictsFromManagers(conflicts) + } + + for manager, conflictSet := range conflicts { + managers[manager] = fieldpath.NewVersionedSet(managers[manager].Set().Difference(conflictSet.Set()), managers[manager].APIVersion(), managers[manager].Applied()) + } + + for manager, removedSet := range removed { + managers[manager] = fieldpath.NewVersionedSet(managers[manager].Set().Difference(removedSet.Set()), managers[manager].APIVersion(), managers[manager].Applied()) + } + + for manager := range managers { + if managers[manager].Set().Empty() { + delete(managers, manager) + } + } + + return managers, compare, nil +} + +// Update is the method you should call once you've merged your final +// object on CREATE/UPDATE/PATCH verbs. newObject must be the object +// that you intend to persist (after applying the patch if this is for a +// PATCH call), and liveObject must be the original object (empty if +// this is a CREATE call). +func (s *Updater) Update(liveObject, newObject *typed.TypedValue, version fieldpath.APIVersion, managers fieldpath.ManagedFields, manager string) (*typed.TypedValue, fieldpath.ManagedFields, error) { + var err error + managers, err = s.reconcileManagedFieldsWithSchemaChanges(liveObject, managers) + if err != nil { + return nil, fieldpath.ManagedFields{}, err + } + if s.enableUnions { + newObject, err = liveObject.NormalizeUnions(newObject) + if err != nil { + return nil, fieldpath.ManagedFields{}, err + } + } + managers, compare, err := s.update(liveObject, newObject, version, managers, manager, true) + if err != nil { + return nil, fieldpath.ManagedFields{}, err + } + if _, ok := managers[manager]; !ok { + managers[manager] = fieldpath.NewVersionedSet(fieldpath.NewSet(), version, false) + } + + ignored := s.IgnoredFields[version] + if ignored == nil { + ignored = fieldpath.NewSet() + } + managers[manager] = fieldpath.NewVersionedSet( + managers[manager].Set().Union(compare.Modified).Union(compare.Added).Difference(compare.Removed).RecursiveDifference(ignored), + version, + false, + ) + if managers[manager].Set().Empty() { + delete(managers, manager) + } + return newObject, managers, nil +} + +// Apply should be called when Apply is run, given the current object as +// well as the configuration that is applied. This will merge the object +// and return it. If the object hasn't changed, nil is returned (the +// managers can still have changed though). +func (s *Updater) Apply(liveObject, configObject *typed.TypedValue, version fieldpath.APIVersion, managers fieldpath.ManagedFields, manager string, force bool) (*typed.TypedValue, fieldpath.ManagedFields, error) { + var err error + managers, err = s.reconcileManagedFieldsWithSchemaChanges(liveObject, managers) + if err != nil { + return nil, fieldpath.ManagedFields{}, err + } + if s.enableUnions { + configObject, err = configObject.NormalizeUnionsApply(configObject) + if err != nil { + return nil, fieldpath.ManagedFields{}, err + } + } + newObject, err := liveObject.Merge(configObject) + if err != nil { + return nil, fieldpath.ManagedFields{}, fmt.Errorf("failed to merge config: %v", err) + } + if s.enableUnions { + newObject, err = configObject.NormalizeUnionsApply(newObject) + if err != nil { + return nil, fieldpath.ManagedFields{}, err + } + } + lastSet := managers[manager] + set, err := configObject.ToFieldSet() + if err != nil { + return nil, fieldpath.ManagedFields{}, fmt.Errorf("failed to get field set: %v", err) + } + + ignored := s.IgnoredFields[version] + if ignored != nil { + set = set.RecursiveDifference(ignored) + // TODO: is this correct. If we don't remove from lastSet pruning might remove the fields? + if lastSet != nil { + lastSet.Set().RecursiveDifference(ignored) + } + } + managers[manager] = fieldpath.NewVersionedSet(set, version, true) + newObject, err = s.prune(newObject, managers, manager, lastSet) + if err != nil { + return nil, fieldpath.ManagedFields{}, fmt.Errorf("failed to prune fields: %v", err) + } + managers, compare, err := s.update(liveObject, newObject, version, managers, manager, force) + if err != nil { + return nil, fieldpath.ManagedFields{}, err + } + if compare.IsSame() { + newObject = nil + } + return newObject, managers, nil +} + +// prune will remove a field, list or map item, iff: +// * applyingManager applied it last time +// * applyingManager didn't apply it this time +// * no other applier claims to manage it +func (s *Updater) prune(merged *typed.TypedValue, managers fieldpath.ManagedFields, applyingManager string, lastSet fieldpath.VersionedSet) (*typed.TypedValue, error) { + if lastSet == nil || lastSet.Set().Empty() { + return merged, nil + } + convertedMerged, err := s.Converter.Convert(merged, lastSet.APIVersion()) + if err != nil { + if s.Converter.IsMissingVersionError(err) { + return merged, nil + } + return nil, fmt.Errorf("failed to convert merged object to last applied version: %v", err) + } + + sc, tr := convertedMerged.Schema(), convertedMerged.TypeRef() + pruned := convertedMerged.RemoveItems(lastSet.Set().EnsureNamedFieldsAreMembers(sc, tr)) + pruned, err = s.addBackOwnedItems(convertedMerged, pruned, managers, applyingManager) + if err != nil { + return nil, fmt.Errorf("failed add back owned items: %v", err) + } + pruned, err = s.addBackDanglingItems(convertedMerged, pruned, lastSet) + if err != nil { + return nil, fmt.Errorf("failed add back dangling items: %v", err) + } + return s.Converter.Convert(pruned, managers[applyingManager].APIVersion()) +} + +// addBackOwnedItems adds back any fields, list and map items that were removed by prune, +// but other appliers or updaters (or the current applier's new config) claim to own. +func (s *Updater) addBackOwnedItems(merged, pruned *typed.TypedValue, managedFields fieldpath.ManagedFields, applyingManager string) (*typed.TypedValue, error) { + var err error + managedAtVersion := map[fieldpath.APIVersion]*fieldpath.Set{} + for _, managerSet := range managedFields { + if _, ok := managedAtVersion[managerSet.APIVersion()]; !ok { + managedAtVersion[managerSet.APIVersion()] = fieldpath.NewSet() + } + managedAtVersion[managerSet.APIVersion()] = managedAtVersion[managerSet.APIVersion()].Union(managerSet.Set()) + } + // Add back owned items at pruned version first to avoid conversion failure + // caused by pruned fields which are required for conversion. + prunedVersion := fieldpath.APIVersion(*pruned.TypeRef().NamedType) + if managed, ok := managedAtVersion[prunedVersion]; ok { + merged, pruned, err = s.addBackOwnedItemsForVersion(merged, pruned, prunedVersion, managed) + if err != nil { + return nil, err + } + delete(managedAtVersion, prunedVersion) + } + for version, managed := range managedAtVersion { + merged, pruned, err = s.addBackOwnedItemsForVersion(merged, pruned, version, managed) + if err != nil { + return nil, err + } + } + return pruned, nil +} + +// addBackOwnedItemsForVersion adds back any fields, list and map items that were removed by prune with specific managed field path at a version. +// It is an extracted sub-function from addBackOwnedItems for code reuse. +func (s *Updater) addBackOwnedItemsForVersion(merged, pruned *typed.TypedValue, version fieldpath.APIVersion, managed *fieldpath.Set) (*typed.TypedValue, *typed.TypedValue, error) { + var err error + merged, err = s.Converter.Convert(merged, version) + if err != nil { + if s.Converter.IsMissingVersionError(err) { + return merged, pruned, nil + } + return nil, nil, fmt.Errorf("failed to convert merged object at version %v: %v", version, err) + } + pruned, err = s.Converter.Convert(pruned, version) + if err != nil { + if s.Converter.IsMissingVersionError(err) { + return merged, pruned, nil + } + return nil, nil, fmt.Errorf("failed to convert pruned object at version %v: %v", version, err) + } + mergedSet, err := merged.ToFieldSet() + if err != nil { + return nil, nil, fmt.Errorf("failed to create field set from merged object at version %v: %v", version, err) + } + prunedSet, err := pruned.ToFieldSet() + if err != nil { + return nil, nil, fmt.Errorf("failed to create field set from pruned object at version %v: %v", version, err) + } + sc, tr := merged.Schema(), merged.TypeRef() + pruned = merged.RemoveItems(mergedSet.EnsureNamedFieldsAreMembers(sc, tr).Difference(prunedSet.EnsureNamedFieldsAreMembers(sc, tr).Union(managed.EnsureNamedFieldsAreMembers(sc, tr)))) + return merged, pruned, nil +} + +// addBackDanglingItems makes sure that the fields list and map items removed by prune were +// previously owned by the currently applying manager. This will add back fields list and map items +// that are unowned or that are owned by Updaters and shouldn't be removed. +func (s *Updater) addBackDanglingItems(merged, pruned *typed.TypedValue, lastSet fieldpath.VersionedSet) (*typed.TypedValue, error) { + convertedPruned, err := s.Converter.Convert(pruned, lastSet.APIVersion()) + if err != nil { + if s.Converter.IsMissingVersionError(err) { + return merged, nil + } + return nil, fmt.Errorf("failed to convert pruned object to last applied version: %v", err) + } + prunedSet, err := convertedPruned.ToFieldSet() + if err != nil { + return nil, fmt.Errorf("failed to create field set from pruned object in last applied version: %v", err) + } + mergedSet, err := merged.ToFieldSet() + if err != nil { + return nil, fmt.Errorf("failed to create field set from merged object in last applied version: %v", err) + } + sc, tr := merged.Schema(), merged.TypeRef() + prunedSet = prunedSet.EnsureNamedFieldsAreMembers(sc, tr) + mergedSet = mergedSet.EnsureNamedFieldsAreMembers(sc, tr) + last := lastSet.Set().EnsureNamedFieldsAreMembers(sc, tr) + return merged.RemoveItems(mergedSet.Difference(prunedSet).Intersection(last)), nil +} + +// reconcileManagedFieldsWithSchemaChanges reconciles the managed fields with any changes to the +// object's schema since the managed fields were written. +// +// Supports: +// - changing types from atomic to granular +// - changing types from granular to atomic +func (s *Updater) reconcileManagedFieldsWithSchemaChanges(liveObject *typed.TypedValue, managers fieldpath.ManagedFields) (fieldpath.ManagedFields, error) { + result := fieldpath.ManagedFields{} + for manager, versionedSet := range managers { + tv, err := s.Converter.Convert(liveObject, versionedSet.APIVersion()) + if s.Converter.IsMissingVersionError(err) { // okay to skip, obsolete versions will be deleted automatically anyway + continue + } + if err != nil { + return nil, err + } + reconciled, err := typed.ReconcileFieldSetWithSchema(versionedSet.Set(), tv) + if err != nil { + return nil, err + } + if reconciled != nil { + result[manager] = fieldpath.NewVersionedSet(reconciled, versionedSet.APIVersion(), versionedSet.Applied()) + } else { + result[manager] = versionedSet + } + } + return result, nil +}